Merge tag 'wireless-drivers-next-for-davem-2015-02-07' of git://git.kernel.org/pub...
authorDavid S. Miller <davem@davemloft.net>
Mon, 9 Feb 2015 20:07:20 +0000 (12:07 -0800)
committerDavid S. Miller <davem@davemloft.net>
Mon, 9 Feb 2015 20:13:58 +0000 (12:13 -0800)
Major changes:

iwlwifi:

* more work for new devices (4165 / 8260)
* cleanups / improvemnts in rate control
* fixes for TDLS
* major statistics work from Johannes - more to come
* improvements for the fw error dump infrastructure
* usual amount of small fixes here and there (scan, D0i3 etc...)
* add support for beamforming
* enable stuck queue detection for iwlmvm
* a few fixes for EBS scan
* fixes for various failure paths
* improvements for TDLS Offchannel

wil6210:

* performance tuning
* some AP features

brcm80211:

* rework some code in SDIO part of the brcmfmac driver related to
  suspend/resume that were found doing stress testing
* in PCIe part scheduling of worker thread needed to be relaxed
* minor fixes and exposing firmware revision information to
  user-space, ie. ethtool.

mwifiex:

* enhancements for change virtual interface handling
* remove coupling between netdev and FW supported interface
  combination, now conversion from any type of supported interface
  types to any other type is possible
* DFS support in AP mode

ath9k:

* fix calibration issues on some boards
* Wake-on-WLAN improvements

ath10k:

* add support for qca6174 hardware
* enable RX batching to reduce CPU load

Conflicts:
drivers/net/wireless/rtlwifi/pci.c

Conflict resolution is to get rid of the 'end' label and keep
the rest.

Signed-off-by: David S. Miller <davem@davemloft.net>
1674 files changed:
.mailmap
Documentation/ABI/testing/sysfs-class-mei
Documentation/ABI/testing/sysfs-platform-dell-laptop [deleted file]
Documentation/DocBook/80211.tmpl
Documentation/devicetree/bindings/arm/arm-boards
Documentation/devicetree/bindings/arm/fw-cfg.txt [new file with mode: 0644]
Documentation/devicetree/bindings/graph.txt
Documentation/devicetree/bindings/i2c/i2c-st.txt
Documentation/devicetree/bindings/i2c/trivial-devices.txt
Documentation/devicetree/bindings/input/gpio-keys.txt
Documentation/devicetree/bindings/input/stmpe-keypad.txt
Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
Documentation/devicetree/bindings/net/davicom-dm9000.txt
Documentation/devicetree/bindings/net/davinci_emac.txt
Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/keystone-netcp.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/nfc/st21nfca.txt
Documentation/devicetree/bindings/net/nfc/st21nfcb.txt
Documentation/devicetree/bindings/net/rockchip-dwmac.txt
Documentation/devicetree/bindings/net/sti-dwmac.txt
Documentation/devicetree/bindings/net/stmmac.txt
Documentation/devicetree/bindings/phy/phy-miphy365x.txt
Documentation/devicetree/bindings/phy/phy-stih407-usb.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/kernel-parameters.txt
Documentation/networking/filter.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/netlink_mmap.txt
Documentation/networking/nf_conntrack-sysctl.txt
Documentation/networking/openvswitch.txt
Documentation/networking/timestamping.txt
Documentation/networking/timestamping/txtimestamp.c
Documentation/sysctl/net.txt
Documentation/target/tcm_mod_builder.py
Documentation/thermal/cpu-cooling-api.txt
MAINTAINERS
Makefile
arch/alpha/kernel/pci.c
arch/alpha/mm/fault.c
arch/arc/mm/fault.c
arch/arm/boot/compressed/head.S
arch/arm/boot/dts/am3517.dtsi
arch/arm/boot/dts/armada-370-db.dts
arch/arm/boot/dts/at91sam9263.dtsi
arch/arm/boot/dts/berlin2q-marvell-dmp.dts
arch/arm/boot/dts/berlin2q.dtsi
arch/arm/boot/dts/dra7-evm.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/exynos5420-arndale-octa.dts
arch/arm/boot/dts/exynos5420.dtsi
arch/arm/boot/dts/imx25.dtsi
arch/arm/boot/dts/imx51-babbage.dts
arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
arch/arm/boot/dts/imx6qdl-sabresd.dtsi
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6sx-sdb.dts
arch/arm/boot/dts/ls1021a.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/rk3288-evb-rk808.dts
arch/arm/boot/dts/rk3288-evb.dtsi
arch/arm/boot/dts/sama5d3xmb.dtsi
arch/arm/boot/dts/sama5d4.dtsi
arch/arm/boot/dts/ste-nomadik-nhk15.dts
arch/arm/boot/dts/stih407-family.dtsi
arch/arm/boot/dts/stih410.dtsi
arch/arm/boot/dts/stih415.dtsi
arch/arm/boot/dts/stih416.dtsi
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
arch/arm/boot/dts/sun5i-a10s.dtsi
arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
arch/arm/boot/dts/sun5i-a13-olinuxino.dts
arch/arm/boot/dts/sun5i-a13.dtsi
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun7i-a20-bananapi.dts
arch/arm/boot/dts/sun7i-a20-hummingbird.dts
arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
arch/arm/boot/dts/sun8i-a23.dtsi
arch/arm/boot/dts/sun9i-a80-optimus.dts
arch/arm/boot/dts/sun9i-a80.dtsi
arch/arm/boot/dts/tegra20-seaboard.dts
arch/arm/boot/dts/vf610-twr.dts
arch/arm/configs/exynos_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/include/asm/kvm_emulate.h
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/kvm_mmu.h
arch/arm/include/uapi/asm/unistd.h
arch/arm/kernel/calls.S
arch/arm/kernel/entry-header.S
arch/arm/kernel/entry-v7m.S
arch/arm/kernel/perf_event.c
arch/arm/kernel/perf_regs.c
arch/arm/kernel/setup.c
arch/arm/kernel/smp.c
arch/arm/kvm/arm.c
arch/arm/kvm/coproc.c
arch/arm/kvm/coproc.h
arch/arm/kvm/coproc_a15.c
arch/arm/kvm/coproc_a7.c
arch/arm/kvm/mmu.c
arch/arm/kvm/trace.h
arch/arm/mach-at91/board-dt-sama5.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-imx/clk-imx6sx.c
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-imx/mach-imx6sx.c
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-omap2/board-generic.c
arch/arm/mach-omap2/common.h
arch/arm/mach-omap2/control.h
arch/arm/mach-omap2/omap-headsmp.S
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-omap2/omap4-common.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod.h
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/omap_hwmod_54xx_data.c
arch/arm/mach-omap2/prcm-common.h
arch/arm/mach-omap2/prm44xx.c
arch/arm/mach-omap2/prm_common.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-omap2/twl-common.c
arch/arm/mach-rockchip/rockchip.c
arch/arm/mach-shmobile/board-ape6evm.c
arch/arm/mach-shmobile/board-lager.c
arch/arm/mach-shmobile/setup-r8a7740.c
arch/arm/mach-shmobile/setup-r8a7778.c
arch/arm/mach-shmobile/setup-r8a7779.c
arch/arm/mach-shmobile/setup-rcar-gen2.c
arch/arm/mach-shmobile/setup-sh73a0.c
arch/arm/mach-shmobile/timer.c
arch/arm/mm/Kconfig
arch/arm/mm/context.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/dump.c
arch/arm/mm/init.c
arch/arm/mm/mmu.c
arch/arm64/Makefile
arch/arm64/boot/dts/Makefile
arch/arm64/boot/dts/arm/juno.dts
arch/arm64/include/asm/arch_timer.h
arch/arm64/include/asm/cpu.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/efi.c
arch/arm64/kernel/module.c
arch/arm64/kernel/perf_regs.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/smp_spin_table.c
arch/arm64/kvm/hyp.S
arch/arm64/kvm/reset.c
arch/arm64/kvm/sys_regs.c
arch/arm64/mm/dump.c
arch/arm64/mm/init.c
arch/avr32/kernel/module.c
arch/avr32/mm/fault.c
arch/blackfin/mach-bf533/boards/stamp.c
arch/cris/arch-v32/drivers/sync_serial.c
arch/cris/kernel/module.c
arch/cris/mm/fault.c
arch/frv/mb93090-mb00/pci-frv.c
arch/frv/mm/fault.c
arch/ia64/include/asm/unistd.h
arch/ia64/include/uapi/asm/unistd.h
arch/ia64/kernel/acpi.c
arch/ia64/kernel/entry.S
arch/ia64/kernel/module.c
arch/ia64/mm/fault.c
arch/ia64/pci/pci.c
arch/m32r/mm/fault.c
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/m68k/mm/fault.c
arch/metag/mm/fault.c
arch/microblaze/mm/fault.c
arch/microblaze/pci/pci-common.c
arch/mips/mm/fault.c
arch/mips/net/bpf_jit.c
arch/mn10300/mm/fault.c
arch/mn10300/unit-asb2305/pci-asb2305.c
arch/mn10300/unit-asb2305/pci.c
arch/nios2/kernel/cpuinfo.c
arch/nios2/kernel/entry.S
arch/nios2/kernel/module.c
arch/nios2/kernel/signal.c
arch/nios2/mm/fault.c
arch/openrisc/mm/fault.c
arch/parisc/kernel/module.c
arch/parisc/mm/fault.c
arch/powerpc/crypto/sha1.c
arch/powerpc/include/asm/kexec.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/thread_info.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/machine_kexec_64.c
arch/powerpc/kernel/pci-common.c
arch/powerpc/kernel/smp.c
arch/powerpc/mm/copro_fault.c
arch/powerpc/mm/fault.c
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/platforms/powernv/opal-wrappers.S
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/xmon/xmon.c
arch/s390/hypfs/hypfs_vm.c
arch/s390/include/asm/irqflags.h
arch/s390/include/asm/timex.h
arch/s390/include/uapi/asm/unistd.h
arch/s390/kernel/module.c
arch/s390/kernel/syscalls.S
arch/s390/kernel/uprobes.c
arch/s390/kernel/vtime.c
arch/s390/mm/fault.c
arch/s390/mm/pgtable.c
arch/s390/net/bpf_jit.S
arch/s390/net/bpf_jit_comp.c
arch/score/mm/fault.c
arch/sh/mm/fault.c
arch/sparc/kernel/pci.c
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/sparc/net/bpf_jit_comp.c
arch/tile/kernel/module.c
arch/tile/mm/fault.c
arch/um/Kconfig.common
arch/um/kernel/trap.c
arch/x86/Kconfig
arch/x86/boot/Makefile
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/misc.c
arch/x86/crypto/Makefile
arch/x86/crypto/aes_ctrby8_avx-x86_64.S
arch/x86/crypto/sha-mb/sha1_mb.c
arch/x86/include/asm/acpi.h
arch/x86/include/asm/desc.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/vgtod.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/mkcapflags.sh
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_intel_rapl.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/cpu/perf_event_intel_uncore.h
arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/irq.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/perf_regs.c
arch/x86/kernel/tls.c
arch/x86/kernel/tsc.c
arch/x86/kvm/emulate.c
arch/x86/kvm/lapic.c
arch/x86/lib/insn.c
arch/x86/mm/fault.c
arch/x86/mm/init.c
arch/x86/mm/mpx.c
arch/x86/mm/pat.c
arch/x86/pci/common.c
arch/x86/pci/i386.c
arch/x86/pci/xen.c
arch/x86/tools/calc_run_size.pl [deleted file]
arch/x86/tools/calc_run_size.sh [new file with mode: 0644]
arch/x86/um/sys_call_table_32.c
arch/x86/um/sys_call_table_64.c
arch/x86/vdso/vma.c
arch/x86/xen/enlighten.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/x86/xen/time.c
arch/xtensa/mm/fault.c
block/blk-core.c
block/blk-mq-tag.c
block/blk-mq-tag.h
block/blk-mq.c
block/blk-mq.h
block/blk-sysfs.c
block/blk-timeout.c
crypto/aes_generic.c
crypto/af_alg.c
crypto/algif_hash.c
crypto/algif_skcipher.c
crypto/ansi_cprng.c
crypto/blowfish_generic.c
crypto/camellia_generic.c
crypto/cast5_generic.c
crypto/cast6_generic.c
crypto/crc32c_generic.c
crypto/crct10dif_generic.c
crypto/des_generic.c
crypto/ghash-generic.c
crypto/krng.c
crypto/salsa20_generic.c
crypto/serpent_generic.c
crypto/sha1_generic.c
crypto/sha256_generic.c
crypto/sha512_generic.c
crypto/tea.c
crypto/tgr192.c
crypto/twofish_generic.c
crypto/wp512.c
drivers/Kconfig
drivers/Makefile
drivers/acpi/acpi_lpss.c
drivers/acpi/acpi_processor.c
drivers/acpi/device_pm.c
drivers/acpi/event.c
drivers/acpi/int340x_thermal.c
drivers/acpi/pci_irq.c
drivers/acpi/processor_core.c
drivers/acpi/scan.c
drivers/acpi/video.c
drivers/ata/Kconfig
drivers/ata/ahci.c
drivers/ata/ahci_xgene.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/libata-sff.c
drivers/ata/sata_dwc_460ex.c
drivers/ata/sata_sil24.c
drivers/atm/eni.c
drivers/atm/fore200e.c
drivers/atm/he.c
drivers/atm/he.h
drivers/atm/horizon.c
drivers/atm/idt77252.c
drivers/atm/iphase.c
drivers/atm/lanai.c
drivers/atm/nicstar.c
drivers/atm/solos-pci.c
drivers/atm/zatm.c
drivers/block/null_blk.c
drivers/block/nvme-core.c
drivers/block/rbd.c
drivers/block/virtio_blk.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btmrvl_drv.h
drivers/bluetooth/btmrvl_main.c
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btusb.c
drivers/bus/arm-cci.c
drivers/bus/mvebu-mbus.c
drivers/char/ipmi/ipmi_ssif.c
drivers/clk/at91/clk-slow.c
drivers/clk/berlin/bg2q.c
drivers/clk/clk-ppc-corenet.c
drivers/clk/clk.c
drivers/clk/rockchip/clk-cpu.c
drivers/clk/rockchip/clk-rk3188.c
drivers/clk/rockchip/clk-rk3288.c
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/bcm_kona_timer.c
drivers/clocksource/exynos_mct.c
drivers/clocksource/sh_tmu.c
drivers/dma/dw/core.c
drivers/dma/dw/platform.c
drivers/gpio/gpio-crystalcove.c
drivers/gpio/gpio-dln2.c
drivers/gpio/gpio-grgpio.c
drivers/gpio/gpio-mcp23s08.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib-sysfs.c
drivers/gpio/gpiolib.c
drivers/gpio/gpiolib.h
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdkfd/Makefile
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c [deleted file]
drivers/gpu/drm/amd/amdkfd/kfd_module.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/cirrus/cirrus_drv.c
drivers/gpu/drm/cirrus/cirrus_drv.h
drivers/gpu/drm/cirrus/cirrus_main.c
drivers/gpu/drm/cirrus/cirrus_mode.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/nouveau/core/core/event.c
drivers/gpu/drm/nouveau/core/core/notify.c
drivers/gpu/drm/nouveau/core/engine/device/nve0.c
drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/dce3_1_afmt.c
drivers/gpu/drm/radeon/kv_dpm.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/ni_dma.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_benchmark.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kfd.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_state.c
drivers/gpu/drm/radeon/radeon_test.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dma.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/hid/Kconfig
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-kye.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-roccat-pyra.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/Kconfig
drivers/hwmon/Makefile
drivers/hwmon/i5500_temp.c [new file with mode: 0644]
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/busses/i2c-sh_mobile.c
drivers/i2c/i2c-core.c
drivers/i2c/i2c-slave-eeprom.c
drivers/iio/adc/ad799x.c
drivers/iio/inkern.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
drivers/infiniband/hw/mlx4/ah.c
drivers/infiniband/hw/mlx4/alias_GUID.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx4/sysfs.c
drivers/infiniband/hw/mlx5/mem.c
drivers/infiniband/hw/nes/nes_nic.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
drivers/input/evdev.c
drivers/input/input.c
drivers/input/keyboard/Kconfig
drivers/input/keyboard/gpio_keys.c
drivers/input/keyboard/hil_kbd.c
drivers/input/keyboard/stmpe-keypad.c
drivers/input/mouse/alps.c
drivers/input/mouse/elantech.c
drivers/input/mouse/synaptics.c
drivers/input/mouse/trackpoint.c
drivers/input/mouse/trackpoint.h
drivers/input/serio/i8042-x86ia64io.h
drivers/input/serio/i8042.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/edt-ft5x06.c
drivers/iommu/intel-iommu.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/rockchip-iommu.c
drivers/iommu/tegra-gart.c
drivers/irqchip/irq-atmel-aic-common.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-hip04.c
drivers/irqchip/irq-mtk-sysirq.c
drivers/irqchip/irq-omap-intc.c
drivers/isdn/hardware/eicon/message.c
drivers/isdn/hardware/mISDN/mISDNipac.c
drivers/isdn/hardware/mISDN/w6692.c
drivers/isdn/isdnloop/isdnloop.c
drivers/isdn/sc/init.c
drivers/leds/leds-netxbig.c
drivers/mcb/mcb-internal.h
drivers/mcb/mcb-pci.c
drivers/md/bitmap.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-cache-target.c
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/md/raid5.c
drivers/media/pci/cx23885/cx23885-cards.c
drivers/media/pci/cx23885/cx23885-core.c
drivers/media/pci/cx23885/cx23885-dvb.c
drivers/media/pci/cx23885/cx23885.h
drivers/media/platform/omap3isp/ispvideo.c
drivers/media/platform/soc_camera/atmel-isi.c
drivers/media/platform/soc_camera/mx2_camera.c
drivers/media/platform/soc_camera/mx3_camera.c
drivers/media/platform/soc_camera/omap1_camera.c
drivers/media/platform/soc_camera/pxa_camera.c
drivers/media/platform/soc_camera/rcar_vin.c
drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
drivers/media/usb/dvb-usb/cxusb.c
drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/mfd/da9052-core.c
drivers/mfd/rtsx_usb.c
drivers/mfd/stmpe.c
drivers/mfd/stmpe.h
drivers/mfd/tps65218.c
drivers/misc/cxl/context.c
drivers/misc/cxl/file.c
drivers/misc/mei/hw-me.c
drivers/misc/vmw_vmci/vmci_queue_pair.c
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-pci.c
drivers/mmc/host/sdhci-pci.h
drivers/mmc/host/sdhci-pxav3.c
drivers/mmc/host/sdhci.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_options.c
drivers/net/caif/caif_hsi.c
drivers/net/can/at91_can.c
drivers/net/can/bfin_can.c
drivers/net/can/c_can/c_can.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/cc770/cc770.c
drivers/net/can/dev.c
drivers/net/can/flexcan.c
drivers/net/can/janz-ican3.c
drivers/net/can/m_can/m_can.c
drivers/net/can/pch_can.c
drivers/net/can/rcar_can.c
drivers/net/can/softing/softing_main.c
drivers/net/can/spi/mcp251x.c
drivers/net/can/ti_hecc.c
drivers/net/can/usb/Kconfig
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/can/usb/peak_usb/Makefile
drivers/net/can/usb/peak_usb/pcan_ucan.h [new file with mode: 0644]
drivers/net/can/usb/peak_usb/pcan_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/can/usb/peak_usb/pcan_usb_core.h
drivers/net/can/usb/peak_usb/pcan_usb_fd.c [new file with mode: 0644]
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
drivers/net/can/usb/peak_usb/pcan_usb_pro.h
drivers/net/can/usb/usb_8dev.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/bcm_sf2_regs.h
drivers/net/dsa/mv88e6352.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/ethernet/3com/typhoon.c
drivers/net/ethernet/allwinner/sun4i-emac.c
drivers/net/ethernet/alteon/acenic.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/nmclan_cs.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/amd/xgbe/xgbe-common.h
drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/atheros/atlx/atl1.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/cadence/at91_ether.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/chelsio/cxgb/sge.c
drivers/net/ethernet/chelsio/cxgb3/sge.c
drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
drivers/net/ethernet/chelsio/cxgb4/Makefile
drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4_values.h [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/cirrus/Kconfig
drivers/net/ethernet/cirrus/ep93xx_eth.c
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_ethtool.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/dec/tulip/winbond-840.c
drivers/net/ethernet/dnet.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_hw.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/fs_enet/fs_enet.h
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/freescale/xgmac_mdio.c
drivers/net/ethernet/hisilicon/Kconfig
drivers/net/ethernet/hisilicon/Makefile
drivers/net/ethernet/hisilicon/hip04_eth.c [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hip04_mdio.c [new file with mode: 0644]
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
drivers/net/ethernet/intel/fm10k/fm10k_type.h
drivers/net/ethernet/intel/i40e/Makefile
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.h
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_fcoe.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_osdep.h
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_adminq.h
drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/regs.h
drivers/net/ethernet/jme.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/alloc.c
drivers/net/ethernet/mellanox/mlx4/catas.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_resources.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/icm.c
drivers/net/ethernet/mellanox/mlx4/intf.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx4/pd.c
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/reset.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/natsemi/ns83820.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/vxge/vxge-config.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/rocker/rocker.h
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
drivers/net/ethernet/smsc/Kconfig
drivers/net/ethernet/smsc/smc91x.h
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/Makefile
drivers/net/ethernet/ti/cpsw-common.c [new file with mode: 0644]
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw.h
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/cpsw_ale.h
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/ti/netcp.h [new file with mode: 0644]
drivers/net/ethernet/ti/netcp_core.c [new file with mode: 0644]
drivers/net/ethernet/ti/netcp_ethss.c [new file with mode: 0644]
drivers/net/ethernet/ti/netcp_sgmii.c [new file with mode: 0644]
drivers/net/ethernet/ti/netcp_xgbepcsr.c [new file with mode: 0644]
drivers/net/ethernet/ti/tlan.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/cc2520.c
drivers/net/ipvlan/ipvlan_core.c
drivers/net/irda/ali-ircc.c
drivers/net/irda/ali-ircc.h
drivers/net/irda/au1k_ir.c
drivers/net/irda/irda-usb.c
drivers/net/irda/irda-usb.h
drivers/net/irda/kingsun-sir.c
drivers/net/irda/ks959-sir.c
drivers/net/irda/mcs7780.c
drivers/net/irda/mcs7780.h
drivers/net/irda/nsc-ircc.c
drivers/net/irda/nsc-ircc.h
drivers/net/irda/stir4200.c
drivers/net/irda/via-ircc.h
drivers/net/irda/vlsi_ir.c
drivers/net/irda/vlsi_ir.h
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/mii.c
drivers/net/phy/Kconfig
drivers/net/phy/amd-xgbe-phy.c
drivers/net/phy/fixed_phy.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/ppp/ppp_deflate.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/hso.c
drivers/net/usb/kaweth.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/sr9700.c
drivers/net/usb/sr9700.h
drivers/net/usb/usbnet.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_defs.h
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vxlan.c
drivers/net/wan/Kconfig
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/nfc/microread/microread.c
drivers/nfc/pn544/i2c.c
drivers/nfc/pn544/pn544.c
drivers/nfc/st21nfca/Makefile
drivers/nfc/st21nfca/i2c.c
drivers/nfc/st21nfca/st21nfca.c
drivers/nfc/st21nfca/st21nfca.h
drivers/nfc/st21nfca/st21nfca_se.c [new file with mode: 0644]
drivers/nfc/st21nfca/st21nfca_se.h [new file with mode: 0644]
drivers/nfc/st21nfcb/Makefile
drivers/nfc/st21nfcb/i2c.c
drivers/nfc/st21nfcb/ndlc.c
drivers/nfc/st21nfcb/st21nfcb.c
drivers/nfc/st21nfcb/st21nfcb.h
drivers/nfc/st21nfcb/st21nfcb_se.c [new file with mode: 0644]
drivers/nfc/st21nfcb/st21nfcb_se.h [new file with mode: 0644]
drivers/of/overlay.c
drivers/of/platform.c
drivers/of/unittest-data/tests-overlay.dtsi
drivers/of/unittest.c
drivers/parisc/lba_pci.c
drivers/pci/bus.c
drivers/pci/host/pcie-designware.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/quirks.c
drivers/pci/setup-bus.c
drivers/phy/phy-miphy28lp.c
drivers/phy/phy-miphy365x.c
drivers/phy/phy-omap-control.c
drivers/phy/phy-stih407-usb.c
drivers/phy/phy-sun4i-usb.c
drivers/phy/phy-ti-pipe3.c
drivers/pinctrl/core.c
drivers/pinctrl/pinctrl-at91.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/pinctrl-st.c
drivers/pinctrl/pinctrl-xway.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/platform/x86/dell-laptop.c
drivers/regulator/core.c
drivers/regulator/s2mps11.c
drivers/reset/reset-sunxi.c
drivers/rtc/rtc-s5m.c
drivers/s390/crypto/ap_bus.c
drivers/s390/net/claw.c
drivers/s390/net/ctcm_fsms.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/ctcm_main.h
drivers/s390/net/ctcm_sysfs.c
drivers/s390/net/lcs.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/scsi/csiostor/Makefile
drivers/scsi/csiostor/csio_hw.c
drivers/scsi/csiostor/csio_hw.h
drivers/scsi/csiostor/csio_hw_chip.h
drivers/scsi/csiostor/csio_hw_t4.c [deleted file]
drivers/scsi/csiostor/csio_hw_t5.c
drivers/scsi/csiostor/csio_init.c
drivers/scsi/csiostor/csio_isr.c
drivers/scsi/csiostor/csio_lnode.c
drivers/scsi/csiostor/csio_mb.c
drivers/scsi/csiostor/csio_scsi.c
drivers/scsi/csiostor/csio_wr.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/device_handler/scsi_dh.c
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/pmcraid.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/spi/spi-dw-mid.c
drivers/spi/spi-dw.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-imx.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-sh-msiof.c
drivers/staging/lustre/lustre/llite/vvp_io.c
drivers/staging/media/tlg2300/Kconfig
drivers/staging/nvec/nvec.c
drivers/staging/vt6655/baseband.c
drivers/staging/vt6655/channel.c
drivers/staging/vt6655/device_main.c
drivers/staging/vt6655/rxtx.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pr.c
drivers/target/target_core_rd.c
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/target/target_core_user.c
drivers/thermal/imx_thermal.c
drivers/thermal/int340x_thermal/acpi_thermal_rel.c
drivers/thermal/int340x_thermal/processor_thermal_device.c
drivers/thermal/of-thermal.c
drivers/thermal/rcar_thermal.c
drivers/thermal/thermal_core.c
drivers/thermal/thermal_core.h
drivers/tty/n_tty.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/samsung.c
drivers/tty/serial/serial_core.c
drivers/tty/tty_io.c
drivers/usb/chipidea/core.c
drivers/usb/chipidea/host.c
drivers/usb/core/otg_whitelist.h
drivers/usb/core/quirks.c
drivers/usb/dwc2/core_intr.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/function/f_midi.c
drivers/usb/gadget/function/f_uac1.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/gadget/udc/bdc/bdc_ep.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/ehci-tegra.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci.c
drivers/usb/musb/Kconfig
drivers/usb/musb/blackfin.c
drivers/usb/musb/musb_cppi41.c
drivers/usb/musb/musb_debugfs.c
drivers/usb/musb/musb_host.c
drivers/usb/phy/phy-mv-usb.c
drivers/usb/phy/phy.c
drivers/usb/serial/console.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/generic.c
drivers/usb/serial/keyspan.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/storage/uas-detect.h
drivers/usb/storage/unusual_devs.h
drivers/usb/storage/unusual_uas.h
drivers/vfio/pci/vfio_pci.c
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/vhost/vhost.c
drivers/video/fbdev/broadsheetfb.c
drivers/video/fbdev/core/fb_defio.c
drivers/video/fbdev/omap2/dss/hdmi_pll.c
drivers/video/fbdev/omap2/dss/pll.c
drivers/video/fbdev/omap2/dss/sdi.c
drivers/video/fbdev/simplefb.c
drivers/video/logo/logo.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_pci_common.h
drivers/virtio/virtio_pci_legacy.c
drivers/watchdog/cadence_wdt.c
drivers/watchdog/imx2_wdt.c
drivers/watchdog/meson_wdt.c
fs/afs/rxrpc.c
fs/btrfs/backref.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/ceph/addr.c
fs/cifs/cifs_debug.c
fs/cifs/file.c
fs/cifs/ioctl.c
fs/cifs/smbencrypt.c
fs/dlm/netlink.c
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/resize.c
fs/ext4/super.c
fs/fcntl.c
fs/fuse/dev.c
fs/fuse/dir.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/gfs2/quota.c
fs/kernfs/dir.c
fs/lockd/svc.c
fs/locks.c
fs/nfs/direct.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfsd/nfs4state.c
fs/notify/fanotify/fanotify_user.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/namei.c
fs/quota/dquot.c
fs/quota/quota.c
fs/udf/file.c
fs/xfs/xfs_qm.h
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_quotaops.c
include/acpi/processor.h
include/asm-generic/tlb.h
include/crypto/if_alg.h
include/dt-bindings/interrupt-controller/arm-gic.h
include/linux/acpi.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/ceph/osd_client.h
include/linux/compiler.h
include/linux/fs.h
include/linux/genetlink.h
include/linux/i2c.h
include/linux/ieee80211.h
include/linux/if_bridge.h
include/linux/if_vlan.h
include/linux/ipv6.h
include/linux/kdb.h
include/linux/kernel.h
include/linux/libata.h
include/linux/list_nulls.h
include/linux/mfd/samsung/s2mps13.h
include/linux/mfd/stmpe.h
include/linux/mlx4/cmd.h
include/linux/mlx4/device.h
include/linux/mlx4/driver.h
include/linux/mlx4/qp.h
include/linux/mm.h
include/linux/mmc/sdhci.h
include/linux/module.h
include/linux/moduleloader.h
include/linux/netdev_features.h
include/linux/netdevice.h
include/linux/nfs_fs_sb.h
include/linux/oom.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/perf_regs.h
include/linux/phy.h
include/linux/phy/omap_control_phy.h
include/linux/platform_data/st21nfca.h
include/linux/platform_data/st21nfcb.h
include/linux/printk.h
include/linux/quota.h
include/linux/quotaops.h
include/linux/rhashtable.h
include/linux/rmap.h
include/linux/skbuff.h
include/linux/socket.h
include/linux/spinlock.h
include/linux/spinlock_api_smp.h
include/linux/spinlock_api_up.h
include/linux/tcp.h
include/linux/time.h
include/linux/udp.h
include/linux/uio.h
include/linux/vmw_vmci_api.h
include/linux/writeback.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/mgmt.h
include/net/bond_3ad.h
include/net/bonding.h
include/net/cfg80211.h
include/net/flow_keys.h
include/net/genetlink.h
include/net/geneve.h
include/net/gro_cells.h
include/net/inet_connection_sock.h
include/net/inet_sock.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip6_tunnel.h
include/net/ip_tunnels.h
include/net/ipv6.h
include/net/mac80211.h
include/net/net_namespace.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_tables.h
include/net/netlink.h
include/net/netns/ipv4.h
include/net/nfc/hci.h
include/net/nfc/nci.h
include/net/nfc/nci_core.h
include/net/nfc/nfc.h
include/net/ping.h
include/net/pkt_sched.h
include/net/route.h
include/net/rtnetlink.h
include/net/sch_generic.h
include/net/sock.h
include/net/switchdev.h
include/net/tc_act/tc_bpf.h [new file with mode: 0644]
include/net/tc_act/tc_connmark.h [new file with mode: 0644]
include/net/tcp.h
include/net/udp_tunnel.h
include/net/udplite.h
include/net/vxlan.h
include/sound/pcm.h
include/target/target_core_backend.h
include/target/target_core_backend_configfs.h
include/target/target_core_base.h
include/trace/events/kvm.h
include/trace/events/net.h
include/uapi/asm-generic/fcntl.h
include/uapi/linux/Kbuild
include/uapi/linux/can/netlink.h
include/uapi/linux/if_bridge.h
include/uapi/linux/if_link.h
include/uapi/linux/in.h
include/uapi/linux/ipv6.h
include/uapi/linux/kfd_ioctl.h
include/uapi/linux/libc-compat.h
include/uapi/linux/neighbour.h
include/uapi/linux/net_namespace.h [new file with mode: 0644]
include/uapi/linux/net_tstamp.h
include/uapi/linux/nfc.h
include/uapi/linux/nl80211.h
include/uapi/linux/openvswitch.h
include/uapi/linux/pkt_sched.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/snmp.h
include/uapi/linux/tc_act/Kbuild
include/uapi/linux/tc_act/tc_bpf.h [new file with mode: 0644]
include/uapi/linux/tc_act/tc_connmark.h [new file with mode: 0644]
include/uapi/linux/uinput.h
include/uapi/linux/virtio_ring.h
include/xen/interface/nmi.h [new file with mode: 0644]
include/xen/page.h
kernel/auditsc.c
kernel/bpf/core.c
kernel/bpf/syscall.c
kernel/cgroup.c
kernel/debug/debug_core.c
kernel/debug/kdb/kdb_bp.c
kernel/debug/kdb/kdb_debugger.c
kernel/debug/kdb/kdb_main.c
kernel/debug/kdb/kdb_private.h
kernel/events/core.c
kernel/exit.c
kernel/kprobes.c
kernel/locking/mutex-debug.c
kernel/locking/spinlock.c
kernel/module.c
kernel/params.c
kernel/range.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sys.c
kernel/taskstats.c
kernel/time/ntp.c
kernel/time/time.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_events.c
kernel/trace/trace_kdb.c
kernel/workqueue.c
lib/Kconfig.debug
lib/Kconfig.kgdb
lib/Makefile
lib/assoc_array.c
lib/checksum.c
lib/iovec.c [deleted file]
lib/rhashtable.c
lib/test_rhashtable.c [new file with mode: 0644]
mm/Kconfig.debug
mm/gup.c
mm/ksm.c
mm/memcontrol.c
mm/memory.c
mm/mmap.c
mm/page-writeback.c
mm/page_alloc.c
mm/rmap.c
mm/vmscan.c
net/8021q/vlan_core.c
net/8021q/vlan_netlink.c
net/batman-adv/Kconfig
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bitarray.c
net/batman-adv/bitarray.h
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/debugfs.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/distributed-arp-table.h
net/batman-adv/fragmentation.c
net/batman-adv/fragmentation.h
net/batman-adv/gateway_client.c
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/multicast.c
net/batman-adv/multicast.h
net/batman-adv/network-coding.c
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/soft-interface.c
net/batman-adv/sysfs.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/bluetooth/6lowpan.c
net/bluetooth/bnep/core.c
net/bluetooth/cmtp/capi.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_debugfs.c
net/bluetooth/hci_event.c
net/bluetooth/hci_request.c
net/bluetooth/hci_sock.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bluetooth/selftest.c
net/bluetooth/smp.c
net/bridge/br.c
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/br_input.c
net/bridge/br_mdb.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_vlan.c
net/bridge/netfilter/ebt_vlan.c
net/bridge/netfilter/ebtables.c
net/bridge/netfilter/nft_reject_bridge.c
net/caif/chnl_net.c
net/can/gw.c
net/ceph/auth_x.c
net/ceph/mon_client.c
net/core/Makefile
net/core/dev.c
net/core/ethtool.c
net/core/fib_rules.c
net/core/flow_dissector.c
net/core/iovec.c [deleted file]
net/core/neighbour.c
net/core/net_namespace.c
net/core/netpoll.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/sysctl_net_core.c
net/decnet/dn_dev.c
net/decnet/dn_fib.c
net/decnet/dn_route.c
net/decnet/dn_table.c
net/dsa/slave.c
net/ieee802154/6lowpan/6lowpan_i.h [new file with mode: 0644]
net/ieee802154/6lowpan/Kconfig [new file with mode: 0644]
net/ieee802154/6lowpan/Makefile [new file with mode: 0644]
net/ieee802154/6lowpan/core.c [new file with mode: 0644]
net/ieee802154/6lowpan/reassembly.c [new file with mode: 0644]
net/ieee802154/6lowpan/rx.c [new file with mode: 0644]
net/ieee802154/6lowpan/tx.c [new file with mode: 0644]
net/ieee802154/6lowpan_rtnl.c [deleted file]
net/ieee802154/Kconfig
net/ieee802154/Makefile
net/ieee802154/af802154.h [deleted file]
net/ieee802154/af_ieee802154.c [deleted file]
net/ieee802154/dgram.c [deleted file]
net/ieee802154/netlink.c
net/ieee802154/nl-mac.c
net/ieee802154/nl-phy.c
net/ieee802154/nl802154.c
net/ieee802154/raw.c [deleted file]
net/ieee802154/reassembly.c [deleted file]
net/ieee802154/reassembly.h [deleted file]
net/ieee802154/socket.c [new file with mode: 0644]
net/ipv4/af_inet.c
net/ipv4/devinet.c
net/ipv4/fib_lookup.h
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/fou.c
net/ipv4/geneve.c
net/ipv4/icmp.c
net/ipv4/inet_diag.c
net/ipv4/ip_forward.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/ipconfig.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter/nft_redir_ipv4.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_bic.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_scalable.c
net/ipv4/tcp_veno.c
net/ipv4/tcp_yeah.c
net/ipv4/udp.c
net/ipv4/udp_diag.c
net/ipv4/udp_offload.c
net/ipv4/udp_tunnel.c
net/ipv6/addrconf.c
net/ipv6/addrlabel.c
net/ipv6/datagram.c
net/ipv6/icmp.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_udp_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/ndisc.c
net/ipv6/netfilter/nft_redir_ipv6.c
net/ipv6/output_core.c
net/ipv6/ping.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/udp_offload.c
net/ipv6/xfrm6_policy.c
net/irda/irlap.c
net/l2tp/l2tp_netlink.c
net/llc/sysctl_net_llc.c
net/mac80211/Kconfig
net/mac80211/Makefile
net/mac80211/aes_ccm.c
net/mac80211/aes_ccm.h
net/mac80211/aes_cmac.c
net/mac80211/aes_cmac.h
net/mac80211/aes_gcm.c [new file with mode: 0644]
net/mac80211/aes_gcm.h [new file with mode: 0644]
net/mac80211/aes_gmac.c [new file with mode: 0644]
net/mac80211/aes_gmac.h [new file with mode: 0644]
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs_key.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/key.h
net/mac80211/main.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/pm.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/tdls.c
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/wpa.c
net/mac80211/wpa.h
net/mac802154/cfg.c
net/mpls/mpls_gso.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_seqadj.c
net/netfilter/nf_log.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nft_hash.c
net/netfilter/nft_masq.c
net/netfilter/nft_nat.c
net/netfilter/nft_redir.c
net/netfilter/xt_osf.c
net/netlabel/netlabel_cipso_v4.c
net/netlabel/netlabel_mgmt.c
net/netlabel/netlabel_unlabeled.c
net/netlink/af_netlink.c
net/netlink/af_netlink.h
net/netlink/diag.c
net/netlink/genetlink.c
net/nfc/core.c
net/nfc/hci/command.c
net/nfc/hci/core.c
net/nfc/hci/hci.h
net/nfc/hci/hcp.c
net/nfc/nci/Makefile
net/nfc/nci/core.c
net/nfc/nci/data.c
net/nfc/nci/hci.c [new file with mode: 0644]
net/nfc/nci/ntf.c
net/nfc/nci/rsp.c
net/nfc/netlink.c
net/nfc/nfc.h
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/openvswitch/flow.h
net/openvswitch/flow_netlink.c
net/openvswitch/flow_netlink.h
net/openvswitch/flow_table.c
net/openvswitch/flow_table.h
net/openvswitch/vport-geneve.c
net/openvswitch/vport-gre.c
net/openvswitch/vport-vxlan.c
net/openvswitch/vport-vxlan.h [new file with mode: 0644]
net/openvswitch/vport.c
net/openvswitch/vport.h
net/packet/af_packet.c
net/packet/diag.c
net/phonet/pn_netlink.c
net/rds/ib_send.c
net/rds/iw_cm.c
net/rds/iw_send.c
net/rds/message.c
net/rds/sysctl.c
net/rfkill/rfkill-gpio.c
net/rxrpc/ar-error.c
net/rxrpc/ar-output.c
net/sched/Kconfig
net/sched/Makefile
net/sched/act_bpf.c [new file with mode: 0644]
net/sched/act_connmark.c [new file with mode: 0644]
net/sched/act_csum.c
net/sched/cls_api.c
net/sched/cls_basic.c
net/sched/cls_bpf.c
net/sched/cls_flow.c
net/sched/em_ipset.c
net/sched/em_meta.c
net/sched/sch_api.c
net/sched/sch_dsmark.c
net/sched/sch_fq.c
net/sched/sch_teql.c
net/sctp/associola.c
net/sctp/sm_make_chunk.c
net/sctp/socket.c
net/socket.c
net/sunrpc/xdr.c
net/switchdev/switchdev.c
net/tipc/Kconfig
net/tipc/addr.c
net/tipc/addr.h
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/config.c
net/tipc/config.h
net/tipc/core.c
net/tipc/core.h
net/tipc/discover.c
net/tipc/discover.h
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/name_distr.h
net/tipc/name_table.c
net/tipc/name_table.h
net/tipc/net.c
net/tipc/net.h
net/tipc/netlink.c
net/tipc/netlink.h
net/tipc/node.c
net/tipc/node.h
net/tipc/server.c
net/tipc/server.h
net/tipc/socket.c
net/tipc/socket.h
net/tipc/subscr.c
net/tipc/subscr.h
net/unix/af_unix.c
net/unix/diag.c
net/vmw_vsock/vmci_transport.c
net/wireless/Kconfig
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/util.c
net/xfrm/xfrm_algo.c
net/xfrm/xfrm_user.c
samples/bpf/test_maps.c
scripts/Makefile.clean
scripts/recordmcount.pl
security/keys/gc.c
sound/core/seq/seq_dummy.c
sound/firewire/amdtp.c
sound/firewire/amdtp.h
sound/firewire/bebob/bebob_stream.c
sound/firewire/fireworks/fireworks_stream.c
sound/firewire/fireworks/fireworks_transaction.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_sigmatel.c
sound/soc/adi/axi-i2s.c
sound/soc/codecs/pcm512x.c
sound/soc/codecs/rt286.c
sound/soc/codecs/rt5677.c
sound/soc/codecs/ts3a227e.c
sound/soc/codecs/wm8904.c
sound/soc/codecs/wm8960.c
sound/soc/dwc/designware_i2s.c
sound/soc/fsl/fsl_esai.h
sound/soc/fsl/fsl_ssi.c
sound/soc/fsl/imx-wm8962.c
sound/soc/generic/simple-card.c
sound/soc/intel/Kconfig
sound/soc/intel/bytcr_dpcm_rt5640.c
sound/soc/intel/sst-firmware.c
sound/soc/intel/sst-haswell-ipc.c
sound/soc/intel/sst/sst_acpi.c
sound/soc/omap/omap-mcbsp.c
sound/soc/rockchip/rockchip_i2s.c
sound/soc/rockchip/rockchip_i2s.h
sound/soc/soc-compress.c
sound/soc/soc-core.c
sound/usb/caiaq/audio.c
sound/usb/mixer.c
tools/include/asm-generic/bitops.h
tools/include/asm-generic/bitops/arch_hweight.h [new file with mode: 0644]
tools/include/asm-generic/bitops/const_hweight.h [new file with mode: 0644]
tools/include/asm-generic/bitops/hweight.h [new file with mode: 0644]
tools/include/linux/bitops.h
tools/lib/api/fs/debugfs.c
tools/lib/api/fs/fs.c
tools/lib/lockdep/preload.c
tools/perf/MANIFEST
tools/perf/Makefile.perf
tools/perf/arch/powerpc/util/skip-callchain-idx.c
tools/perf/bench/sched-pipe.c
tools/perf/builtin-annotate.c
tools/perf/builtin-diff.c
tools/perf/builtin-list.c
tools/perf/builtin-report.c
tools/perf/builtin-top.c
tools/perf/config/Makefile
tools/perf/config/Makefile.arch
tools/perf/perf-sys.h
tools/perf/scripts/perl/Perf-Trace-Util/Context.c
tools/perf/tests/dwarf-unwind.c
tools/perf/tests/hists_cumulate.c
tools/perf/tests/hists_filter.c
tools/perf/tests/hists_output.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/hist.c
tools/perf/ui/tui/setup.c
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/cache.h
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/evlist.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/hweight.c [deleted file]
tools/perf/util/include/asm/hweight.h [deleted file]
tools/perf/util/machine.c
tools/perf/util/map.h
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c
tools/perf/util/python-ext-sources
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/unwind-libunwind.c
tools/testing/selftests/exec/execveat.c
tools/testing/selftests/mqueue/mq_perf_tests.c
tools/testing/selftests/vm/Makefile

index ada8ad696b2e902489c6e8a8f713f1285bf5a9c5..d357e1bd2a434665ae545d9ed970edd77f15f7d9 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -51,6 +51,7 @@ Greg Kroah-Hartman <gregkh@suse.de>
 Greg Kroah-Hartman <greg@kroah.com>
 Henk Vergonet <Henk.Vergonet@gmail.com>
 Henrik Kretzschmar <henne@nachtwindheim.de>
+Henrik Rydberg <rydberg@bitmath.org>
 Herbert Xu <herbert@gondor.apana.org.au>
 Jacob Shin <Jacob.Shin@amd.com>
 James Bottomley <jejb@mulgrave.(none)>
index 0ec8b8178c41305a4435b208646715c22b94154a..80d9888a8ece2673686ead1cda4504ce568a1051 100644 (file)
@@ -14,3 +14,18 @@ Description:
                The /sys/class/mei/meiN directory is created for
                each probed mei device
 
+What:          /sys/class/mei/meiN/fw_status
+Date:          Nov 2014
+KernelVersion: 3.19
+Contact:       Tomas Winkler <tomas.winkler@intel.com>
+Description:   Display fw status registers content
+
+               The ME FW writes its status information into fw status
+               registers for BIOS and OS to monitor fw health.
+
+               The register contains running state, power management
+               state, error codes, and others. The way the registers
+               are decoded depends on PCH or SoC generation.
+               Also number of registers varies between 1 and 6
+               depending on generation.
+
diff --git a/Documentation/ABI/testing/sysfs-platform-dell-laptop b/Documentation/ABI/testing/sysfs-platform-dell-laptop
deleted file mode 100644 (file)
index 7969443..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-What:          /sys/class/leds/dell::kbd_backlight/als_setting
-Date:          December 2014
-KernelVersion: 3.19
-Contact:       Gabriele Mazzotta <gabriele.mzt@gmail.com>,
-               Pali Rohár <pali.rohar@gmail.com>
-Description:
-               This file allows to control the automatic keyboard
-               illumination mode on some systems that have an ambient
-               light sensor. Write 1 to this file to enable the auto
-               mode, 0 to disable it.
-
-What:          /sys/class/leds/dell::kbd_backlight/start_triggers
-Date:          December 2014
-KernelVersion: 3.19
-Contact:       Gabriele Mazzotta <gabriele.mzt@gmail.com>,
-               Pali Rohár <pali.rohar@gmail.com>
-Description:
-               This file allows to control the input triggers that
-               turn on the keyboard backlight illumination that is
-               disabled because of inactivity.
-               Read the file to see the triggers available. The ones
-               enabled are preceded by '+', those disabled by '-'.
-
-               To enable a trigger, write its name preceded by '+' to
-               this file. To disable a trigger, write its name preceded
-               by '-' instead.
-
-               For example, to enable the keyboard as trigger run:
-                   echo +keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
-               To disable it:
-                   echo -keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
-
-               Note that not all the available triggers can be configured.
-
-What:          /sys/class/leds/dell::kbd_backlight/stop_timeout
-Date:          December 2014
-KernelVersion: 3.19
-Contact:       Gabriele Mazzotta <gabriele.mzt@gmail.com>,
-               Pali Rohár <pali.rohar@gmail.com>
-Description:
-               This file allows to specify the interval after which the
-               keyboard illumination is disabled because of inactivity.
-               The timeouts are expressed in seconds, minutes, hours and
-               days, for which the symbols are 's', 'm', 'h' and 'd'
-               respectively.
-
-               To configure the timeout, write to this file a value along
-               with any the above units. If no unit is specified, the value
-               is assumed to be expressed in seconds.
-
-               For example, to set the timeout to 10 minutes run:
-                   echo 10m > /sys/class/leds/dell::kbd_backlight/stop_timeout
-
-               Note that when this file is read, the returned value might be
-               expressed in a different unit than the one used when the timeout
-               was set.
-
-               Also note that only some timeouts are supported and that
-               some systems might fall back to a specific timeout in case
-               an invalid timeout is written to this file.
index 49b8b8907f36e0b9797f47e5a7de0f00e0c90db4..aac9357d4866bda01fcf2a0e532a5068b5d4bfbf 100644 (file)
       <section id="ps-client">
         <title>support for powersaving clients</title>
 !Pinclude/net/mac80211.h AP support for powersaving clients
-      </section>
 !Finclude/net/mac80211.h ieee80211_get_buffered_bc
 !Finclude/net/mac80211.h ieee80211_beacon_get
 !Finclude/net/mac80211.h ieee80211_sta_eosp
 !Finclude/net/mac80211.h ieee80211_sta_ps_transition_ni
 !Finclude/net/mac80211.h ieee80211_sta_set_buffered
 !Finclude/net/mac80211.h ieee80211_sta_block_awake
+      </section>
       </chapter>
 
       <chapter id="multi-iface">
           <title>RX A-MPDU aggregation</title>
 !Pnet/mac80211/agg-rx.c RX A-MPDU aggregation
 !Cnet/mac80211/agg-rx.c
-        </sect1>
 !Finclude/net/mac80211.h ieee80211_ampdu_mlme_action
+        </sect1>
       </chapter>
 
       <chapter id="smps">
index 556c8665fdbf0aa5b89e4a0818b66e30b702f3e9..b78564b2b2019e06a4fea1863191d2cab6303ee2 100644 (file)
@@ -23,7 +23,7 @@ Required nodes:
     range of 0x200 bytes.
 
 - syscon: the root node of the Integrator platforms must have a
-  system controller node pointong to the control registers,
+  system controller node pointing to the control registers,
   with the compatible string
   "arm,integrator-ap-syscon"
   "arm,integrator-cp-syscon"
diff --git a/Documentation/devicetree/bindings/arm/fw-cfg.txt b/Documentation/devicetree/bindings/arm/fw-cfg.txt
new file mode 100644 (file)
index 0000000..953fb64
--- /dev/null
@@ -0,0 +1,72 @@
+* QEMU Firmware Configuration bindings for ARM
+
+QEMU's arm-softmmu and aarch64-softmmu emulation / virtualization targets
+provide the following Firmware Configuration interface on the "virt" machine
+type:
+
+- A write-only, 16-bit wide selector (or control) register,
+- a read-write, 64-bit wide data register.
+
+QEMU exposes the control and data register to ARM guests as memory mapped
+registers; their location is communicated to the guest's UEFI firmware in the
+DTB that QEMU places at the bottom of the guest's DRAM.
+
+The guest writes a selector value (a key) to the selector register, and then
+can read the corresponding data (produced by QEMU) via the data register. If
+the selected entry is writable, the guest can rewrite it through the data
+register.
+
+The selector register takes keys in big endian byte order.
+
+The data register allows accesses with 8, 16, 32 and 64-bit width (only at
+offset 0 of the register). Accesses larger than a byte are interpreted as
+arrays, bundled together only for better performance. The bytes constituting
+such a word, in increasing address order, correspond to the bytes that would
+have been transferred by byte-wide accesses in chronological order.
+
+The interface allows guest firmware to download various parameters and blobs
+that affect how the firmware works and what tables it installs for the guest
+OS. For example, boot order of devices, ACPI tables, SMBIOS tables, kernel and
+initrd images for direct kernel booting, virtual machine UUID, SMP information,
+virtual NUMA topology, and so on.
+
+The authoritative registry of the valid selector values and their meanings is
+the QEMU source code; the structure of the data blobs corresponding to the
+individual key values is also defined in the QEMU source code.
+
+The presence of the registers can be verified by selecting the "signature" blob
+with key 0x0000, and reading four bytes from the data register. The returned
+signature is "QEMU".
+
+The outermost protocol (involving the write / read sequences of the control and
+data registers) is expected to be versioned, and/or described by feature bits.
+The interface revision / feature bitmap can be retrieved with key 0x0001. The
+blob to be read from the data register has size 4, and it is to be interpreted
+as a uint32_t value in little endian byte order. The current value
+(corresponding to the above outer protocol) is zero.
+
+The guest kernel is not expected to use these registers (although it is
+certainly allowed to); the device tree bindings are documented here because
+this is where device tree bindings reside in general.
+
+Required properties:
+
+- compatible: "qemu,fw-cfg-mmio".
+
+- reg: the MMIO region used by the device.
+  * Bytes 0x0 to 0x7 cover the data register.
+  * Bytes 0x8 to 0x9 cover the selector register.
+  * Further registers may be appended to the region in case of future interface
+    revisions / feature bits.
+
+Example:
+
+/ {
+       #size-cells = <0x2>;
+       #address-cells = <0x2>;
+
+       fw-cfg@9020000 {
+               compatible = "qemu,fw-cfg-mmio";
+               reg = <0x0 0x9020000 0x0 0xa>;
+       };
+};
index 1a69c078adf2bbf94f2714f35b43cfec1fc72fc7..fcb1c6a4787b49ba9b76b04126989c9c37a43a9a 100644 (file)
@@ -19,7 +19,7 @@ type of the connections, they just map their existence. Specific properties
 may be described by specialized bindings depending on the type of connection.
 
 To see how this binding applies to video pipelines, for example, see
-Documentation/device-tree/bindings/media/video-interfaces.txt.
+Documentation/devicetree/bindings/media/video-interfaces.txt.
 Here the ports describe data interfaces, and the links between them are
 the connecting data buses. A single port with multiple connections can
 correspond to multiple devices being connected to the same physical bus.
index 437e0db3823cac05ec71702827159851d0806d03..4c26fda3844a7f06c05cdef710d7e08fef2b76b8 100644 (file)
@@ -31,7 +31,7 @@ i2c0: i2c@fed40000 {
        compatible      = "st,comms-ssc4-i2c";
        reg             = <0xfed40000 0x110>;
        interrupts      =  <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
-       clocks          = <&CLK_S_ICN_REG_0>;
+       clocks          = <&clk_s_a0_ls CLK_ICN_REG>;
        clock-names     = "ssc";
        clock-frequency = <400000>;
        pinctrl-names   = "default";
index 9f4e3824e71eb22bb825cfcd0e2c6d128242f6af..9f41d05be3be8676e307f37f00accdab12722bff 100644 (file)
@@ -47,6 +47,7 @@ dallas,ds3232         Extremely Accurate I²C RTC with Integrated Crystal and SRAM
 dallas,ds4510          CPU Supervisor with Nonvolatile Memory and Programmable I/O
 dallas,ds75            Digital Thermometer and Thermostat
 dlg,da9053             DA9053: flexible system level PMIC with multicore support
+dlg,da9063             DA9063: system PMIC for quad-core application processors
 epson,rx8025           High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
 epson,rx8581           I2C-BUS INTERFACE REAL TIME CLOCK MODULE
 fsl,mag3110            MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
index a4a38fcf2ed61d1fa3db42e053fb8259f2ffcc71..44b705767aca45ea1366c6e624f0515c8cb78b99 100644 (file)
@@ -10,12 +10,13 @@ Optional properties:
 Each button (key) is represented as a sub-node of "gpio-keys":
 Subnode properties:
 
+       - gpios: OF device-tree gpio specification.
+       - interrupts: the interrupt line for that input.
        - label: Descriptive name of the key.
        - linux,code: Keycode to emit.
 
-Required mutual exclusive subnode-properties:
-       - gpios: OF device-tree gpio specification.
-       - interrupts: the interrupt line for that input
+Note that either "interrupts" or "gpios" properties can be omitted, but not
+both at the same time. Specifying both properties is allowed.
 
 Optional subnode-properties:
        - linux,input-type: Specify event type this button/key generates.
@@ -23,6 +24,9 @@ Optional subnode-properties:
        - debounce-interval: Debouncing interval time in milliseconds.
          If not specified defaults to 5.
        - gpio-key,wakeup: Boolean, button can wake-up the system.
+       - linux,can-disable: Boolean, indicates that button is connected
+         to dedicated (not shared) interrupt which can be disabled to
+         suppress events from the button.
 
 Example nodes:
 
index 1b97222e8a0bfe30d88f9d921195b721ecd65194..12bb771d66d446647722ba3e423aabc3734f77bc 100644 (file)
@@ -8,6 +8,8 @@ Optional properties:
  - debounce-interval        : Debouncing interval time in milliseconds
  - st,scan-count            : Scanning cycles elapsed before key data is updated
  - st,no-autorepeat         : If specified device will not autorepeat
+ - keypad,num-rows          : See ./matrix-keymap.txt
+ - keypad,num-columns       : See ./matrix-keymap.txt
 
 Example:
 
index 42409bfe04c4b9b25ac6743d8f30d00a7403461b..33df3932168e1b8941a5de1565b1cf02f96fb551 100644 (file)
@@ -7,17 +7,38 @@ Required properties:
    - SerDes Rx/Tx registers
    - SerDes integration registers (1/2)
    - SerDes integration registers (2/2)
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the amd-xgbe-phy interrupt.
 
 Optional properties:
 - amd,speed-set: Speed capabilities of the device
     0 - 1GbE and 10GbE (default)
     1 - 2.5GbE and 10GbE
 
+The following optional properties are represented by an array with each
+value corresponding to a particular speed. The first array value represents
+the setting for the 1GbE speed, the second value for the 2.5GbE speed and
+the third value for the 10GbE speed.  All three values are required if the
+property is used.
+- amd,serdes-blwc: Baseline wandering correction enablement
+    0 - Off
+    1 - On
+- amd,serdes-cdr-rate: CDR rate speed selection
+- amd,serdes-pq-skew: PQ (data sampling) skew
+- amd,serdes-tx-amp: TX amplitude boost
+
 Example:
        xgbe_phy@e1240800 {
                compatible = "amd,xgbe-phy-seattle-v1a", "ethernet-phy-ieee802.3-c45";
                reg = <0 0xe1240800 0 0x00400>,
                      <0 0xe1250000 0 0x00060>,
                      <0 0xe1250080 0 0x00004>;
+               interrupt-parent = <&gic>;
+               interrupts = <0 323 4>;
                amd,speed-set = <0>;
+               amd,serdes-blwc = <1>, <1>, <0>;
+               amd,serdes-cdr-rate = <2>, <2>, <7>;
+               amd,serdes-pq-skew = <10>, <10>, <30>;
+               amd,serdes-tx-amp = <15>, <15>, <10>;
        };
index 28767ed7c1bdcf44b43be8fdd50182706fd2a5f1..5224bf05f6f8b36e5c78cabd7b83ec60c1b9e849 100644 (file)
@@ -11,6 +11,8 @@ Required properties:
 Optional properties:
 - davicom,no-eeprom : Configuration EEPROM is not available
 - davicom,ext-phy : Use external PHY
+- reset-gpios : phandle of gpio that will be used to reset chip during probe
+- vcc-supply : phandle of regulator that will be used to enable power to chip
 
 Example:
 
@@ -21,4 +23,6 @@ Example:
                interrupts = <7 4>;
                local-mac-address = [00 00 de ad be ef];
                davicom,no-eeprom;
+               reset-gpios = <&gpf 12 GPIO_ACTIVE_LOW>;
+               vcc-supply = <&eth0_power>;
        };
index 032808843f90a3b1db0389b0a60dd075c9ea3ac6..24c5cdaba8d279a4b132fbd2f964ae1460b3fd0f 100644 (file)
@@ -4,7 +4,8 @@ This file provides information, what the device node
 for the davinci_emac interface contains.
 
 Required properties:
-- compatible: "ti,davinci-dm6467-emac" or "ti,am3517-emac"
+- compatible: "ti,davinci-dm6467-emac", "ti,am3517-emac" or
+  "ti,dm816-emac"
 - reg: Offset and length of the register set for the device
 - ti,davinci-ctrl-reg-offset: offset to control register
 - ti,davinci-ctrl-mod-reg-offset: offset to control module register
index be6ea8960f208c72b7d69e0286c56712661ccb2c..1e97532a0b792603477ce3e63c5a415c025dc8b0 100644 (file)
@@ -8,7 +8,16 @@ of how to define a PHY.
 Required properties:
   - reg : Offset and length of the register set for the device
   - compatible : Should define the compatible device type for the
-    mdio.  Currently, this is most likely to be "fsl,gianfar-mdio"
+    mdio. Currently supported strings/devices are:
+       - "fsl,gianfar-tbi"
+       - "fsl,gianfar-mdio"
+       - "fsl,etsec2-tbi"
+       - "fsl,etsec2-mdio"
+       - "fsl,ucc-mdio"
+       - "fsl,fman-mdio"
+    When device_type is "mdio", the following strings are also considered:
+       - "gianfar"
+       - "ucc_geth_phy"
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt b/Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt
new file mode 100644 (file)
index 0000000..988fc69
--- /dev/null
@@ -0,0 +1,88 @@
+Hisilicon hip04 Ethernet Controller
+
+* Ethernet controller node
+
+Required properties:
+- compatible: should be "hisilicon,hip04-mac".
+- reg: address and length of the register set for the device.
+- interrupts: interrupt for the device.
+- port-handle: <phandle port channel>
+       phandle, specifies a reference to the syscon ppe node
+       port, port number connected to the controller
+       channel, recv channel start from channel * number (RX_DESC_NUM)
+- phy-mode: see ethernet.txt [1].
+
+Optional properties:
+- phy-handle: see ethernet.txt [1].
+
+[1] Documentation/devicetree/bindings/net/ethernet.txt
+
+
+* Ethernet ppe node:
+Control rx & tx fifos of all ethernet controllers.
+Have 2048 recv channels shared by all ethernet controllers, only if no overlap.
+Each controller's recv channel start from channel * number (RX_DESC_NUM).
+
+Required properties:
+- compatible: "hisilicon,hip04-ppe", "syscon".
+- reg: address and length of the register set for the device.
+
+
+* MDIO bus node:
+
+Required properties:
+
+- compatible: should be "hisilicon,hip04-mdio".
+- Inherits from MDIO bus node binding [2]
+[2] Documentation/devicetree/bindings/net/phy.txt
+
+Example:
+       mdio {
+               compatible = "hisilicon,hip04-mdio";
+               reg = <0x28f1000 0x1000>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               phy0: ethernet-phy@0 {
+                       compatible = "ethernet-phy-ieee802.3-c22";
+                       reg = <0>;
+                       marvell,reg-init = <18 0x14 0 0x8001>;
+               };
+
+               phy1: ethernet-phy@1 {
+                       compatible = "ethernet-phy-ieee802.3-c22";
+                       reg = <1>;
+                       marvell,reg-init = <18 0x14 0 0x8001>;
+               };
+       };
+
+       ppe: ppe@28c0000 {
+               compatible = "hisilicon,hip04-ppe", "syscon";
+               reg = <0x28c0000 0x10000>;
+       };
+
+       fe: ethernet@28b0000 {
+               compatible = "hisilicon,hip04-mac";
+               reg = <0x28b0000 0x10000>;
+               interrupts = <0 413 4>;
+               phy-mode = "mii";
+               port-handle = <&ppe 31 0>;
+       };
+
+       ge0: ethernet@2800000 {
+               compatible = "hisilicon,hip04-mac";
+               reg = <0x2800000 0x10000>;
+               interrupts = <0 402 4>;
+               phy-mode = "sgmii";
+               port-handle = <&ppe 0 1>;
+               phy-handle = <&phy0>;
+       };
+
+       ge8: ethernet@2880000 {
+               compatible = "hisilicon,hip04-mac";
+               reg = <0x2880000 0x10000>;
+               interrupts = <0 410 4>;
+               phy-mode = "sgmii";
+               port-handle = <&ppe 8 2>;
+               phy-handle = <&phy1>;
+       };
diff --git a/Documentation/devicetree/bindings/net/keystone-netcp.txt b/Documentation/devicetree/bindings/net/keystone-netcp.txt
new file mode 100644 (file)
index 0000000..f9c0771
--- /dev/null
@@ -0,0 +1,197 @@
+This document describes the device tree bindings associated with the
+keystone network coprocessor(NetCP) driver support.
+
+The network coprocessor (NetCP) is a hardware accelerator that processes
+Ethernet packets. NetCP has a gigabit Ethernet (GbE) subsytem with a ethernet
+switch sub-module to send and receive packets. NetCP also includes a packet
+accelerator (PA) module to perform packet classification operations such as
+header matching, and packet modification operations such as checksum
+generation. NetCP can also optionally include a Security Accelerator (SA)
+capable of performing IPSec operations on ingress/egress packets.
+
+Keystone II SoC's also have a 10 Gigabit Ethernet Subsystem (XGbE) which
+includes a 3-port Ethernet switch sub-module capable of 10Gb/s and 1Gb/s rates
+per Ethernet port.
+
+Keystone NetCP driver has a plug-in module architecture where each of the NetCP
+sub-modules exist as a loadable kernel module which plug in to the netcp core.
+These sub-modules are represented as "netcp-devices" in the dts bindings. It is
+mandatory to have the ethernet switch sub-module for the ethernet interface to
+be operational. Any other sub-module like the PA is optional.
+
+NetCP Ethernet SubSystem Layout:
+
+-----------------------------
+  NetCP subsystem(10G or 1G)
+-----------------------------
+       |
+       |-> NetCP Devices ->    |
+       |                       |-> GBE/XGBE Switch
+       |                       |
+       |                       |-> Packet Accelerator
+       |                       |
+       |                       |-> Security Accelerator
+       |
+       |
+       |
+       |-> NetCP Interfaces -> |
+                               |-> Ethernet Port 0
+                               |
+                               |-> Ethernet Port 1
+                               |
+                               |-> Ethernet Port 2
+                               |
+                               |-> Ethernet Port 3
+
+
+NetCP subsystem properties:
+Required properties:
+- compatible:  Should be "ti,netcp-1.0"
+- clocks:      phandle to the reference clocks for the subsystem.
+- dma-id:      Navigator packet dma instance id.
+
+Optional properties:
+- reg:         register location and the size for the following register
+               regions in the specified order.
+               - Efuse MAC address register
+- dma-coherent:        Present if dma operations are coherent
+- big-endian:  Keystone devices can be operated in a mode where the DSP is in
+               the big endian mode. In such cases enable this option. This
+               option should also be enabled if the ARM is operated in
+               big endian mode with the DSP in little endian.
+
+NetCP device properties: Device specification for NetCP sub-modules.
+1Gb/10Gb (gbe/xgbe) ethernet switch sub-module specifications.
+Required properties:
+- label:       Must be "netcp-gbe" for 1Gb & "netcp-xgbe" for 10Gb.
+- reg:         register location and the size for the following register
+               regions in the specified order.
+               - subsystem registers
+               - serdes registers
+- tx-channel:  the navigator packet dma channel name for tx.
+- tx-queue:    the navigator queue number associated with the tx dma channel.
+- interfaces:  specification for each of the switch port to be registered as a
+               network interface in the stack.
+-- slave-port: Switch port number, 0 based numbering.
+-- link-interface:     type of link interface, supported options are
+                       - mac<->mac auto negotiate mode: 0
+                       - mac<->phy mode: 1
+                       - mac<->mac forced mode: 2
+                       - mac<->fiber mode: 3
+                       - mac<->phy mode with no mdio: 4
+                       - 10Gb mac<->phy mode : 10
+                       - 10Gb mac<->mac forced mode : 11
+----phy-handle:        phandle to PHY device
+
+Optional properties:
+- enable-ale:  NetCP driver keeps the address learning feature in the ethernet
+               switch module disabled. This attribute is to enable the address
+               learning.
+- secondary-slave-ports:       specification for each of the switch port not be
+                               registered as a network interface. NetCP driver
+                               will only initialize these ports and attach PHY
+                               driver to them if needed.
+
+NetCP interface properties: Interface specification for NetCP sub-modules.
+Required properties:
+- rx-channel:  the navigator packet dma channel name for rx.
+- rx-queue:    the navigator queue number associated with rx dma channel.
+- rx-pool:     specifies the number of descriptors to be used & the region-id
+               for creating the rx descriptor pool.
+- tx-pool:     specifies the number of descriptors to be used & the region-id
+               for creating the tx descriptor pool.
+- rx-queue-depth:      number of descriptors in each of the free descriptor
+                       queue (FDQ) for the pktdma Rx flow. There can be at
+                       present a maximum of 4 queues per Rx flow.
+- rx-buffer-size:      the buffer size for each of the Rx flow FDQ.
+- tx-completion-queue: the navigator queue number where the descriptors are
+                       recycled after Tx DMA completion.
+
+Optional properties:
+- efuse-mac:   If this is 1, then the MAC address for the interface is
+               obtained from the device efuse mac address register
+- local-mac-address:   the driver is designed to use the of_get_mac_address api
+                       only if efuse-mac is 0. When efuse-mac is 0, the MAC
+                       address is obtained from local-mac-address. If this
+                       attribute is not present, then the driver will use a
+                       random MAC address.
+- "netcp-device label":        phandle to the device specification for each of NetCP
+                       sub-module attached to this interface.
+
+Example binding:
+
+netcp: netcp@2090000 {
+       reg = <0x2620110 0x8>;
+       reg-names = "efuse";
+       compatible = "ti,netcp-1.0";
+       #address-cells = <1>;
+       #size-cells = <1>;
+       ranges;
+
+       clocks = <&papllclk>, <&clkcpgmac>, <&chipclk12>;
+       dma-coherent;
+       /* big-endian; */
+       dma-id = <0>;
+
+       netcp-devices {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+               gbe@0x2090000 {
+                       label = "netcp-gbe";
+                       reg = <0x2090000 0xf00>;
+                       /* enable-ale; */
+                       tx-queue = <648>;
+                       tx-channel = <8>;
+
+                       interfaces {
+                               gbe0: interface-0 {
+                                       slave-port = <0>;
+                                       link-interface  = <4>;
+                               };
+                               gbe1: interface-1 {
+                                       slave-port = <1>;
+                                       link-interface  = <4>;
+                               };
+                       };
+
+                       secondary-slave-ports {
+                               port-2 {
+                                       slave-port = <2>;
+                                       link-interface  = <2>;
+                               };
+                               port-3 {
+                                       slave-port = <3>;
+                                       link-interface  = <2>;
+                               };
+                       };
+               };
+       };
+
+       netcp-interfaces {
+               interface-0 {
+                       rx-channel = <22>;
+                       rx-pool = <1024 12>;
+                       tx-pool = <1024 12>;
+                       rx-queue-depth = <128 128 0 0>;
+                       rx-buffer-size = <1518 4096 0 0>;
+                       rx-queue = <8704>;
+                       tx-completion-queue = <8706>;
+                       efuse-mac = <1>;
+                       netcp-gbe = <&gbe0>;
+
+               };
+               interface-1 {
+                       rx-channel = <23>;
+                       rx-pool = <1024 12>;
+                       tx-pool = <1024 12>;
+                       rx-queue-depth = <128 128 0 0>;
+                       rx-buffer-size = <1518 4096 0 0>;
+                       rx-queue = <8705>;
+                       tx-completion-queue = <8707>;
+                       efuse-mac = <0>;
+                       local-mac-address = [02 18 31 7e 3e 6f];
+                       netcp-gbe = <&gbe1>;
+               };
+       };
+};
index e4faa2e8dfebe3cb3b163944ed0968be82c46ba1..7bb2e213d6f924ba7c3a908f726ec6b83b2c211d 100644 (file)
@@ -1,7 +1,7 @@
 * STMicroelectronics SAS. ST21NFCA NFC Controller
 
 Required properties:
-- compatible: Should be "st,st21nfca_i2c".
+- compatible: Should be "st,st21nfca-i2c".
 - clock-frequency: I²C work frequency.
 - reg: address on the bus
 - interrupt-parent: phandle for the interrupt gpio controller
@@ -11,6 +11,10 @@ Required properties:
 Optional SoC Specific Properties:
 - pinctrl-names: Contains only one value - "default".
 - pintctrl-0: Specifies the pin control groups used for this controller.
+- ese-present: Specifies that an ese is physically connected to the nfc
+controller.
+- uicc-present: Specifies that the uicc swp signal can be physically
+connected to the nfc controller.
 
 Example (for ARM-based BeagleBoard xM with ST21NFCA on I2C2):
 
@@ -20,7 +24,7 @@ Example (for ARM-based BeagleBoard xM with ST21NFCA on I2C2):
 
        st21nfca: st21nfca@1 {
 
-               compatible = "st,st21nfca_i2c";
+               compatible = "st,st21nfca-i2c";
 
                reg = <0x01>;
                clock-frequency = <400000>;
@@ -29,5 +33,8 @@ Example (for ARM-based BeagleBoard xM with ST21NFCA on I2C2):
                interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
 
                enable-gpios = <&gpio5 29 GPIO_ACTIVE_HIGH>;
+
+               ese-present;
+               uicc-present;
        };
 };
index 9005608cbbd1e7fe1683b15ce8fcc6bbd7b43586..bb237072dbe96acb8fdbb86997c0923540b0fde5 100644 (file)
@@ -1,7 +1,7 @@
 * STMicroelectronics SAS. ST21NFCB NFC Controller
 
 Required properties:
-- compatible: Should be "st,st21nfcb_i2c".
+- compatible: Should be "st,st21nfcb-i2c".
 - clock-frequency: I²C work frequency.
 - reg: address on the bus
 - interrupt-parent: phandle for the interrupt gpio controller
@@ -20,7 +20,7 @@ Example (for ARM-based BeagleBoard xM with ST21NFCB on I2C2):
 
        st21nfcb: st21nfcb@8 {
 
-               compatible = "st,st21nfcb_i2c";
+               compatible = "st,st21nfcb-i2c";
 
                reg = <0x08>;
                clock-frequency = <400000>;
index 2362dcd5afc9d39782cb4d4bc51c77997a2a7f35..21fd199e89b5c34ed9d4f7a361b86d5c155fb34b 100644 (file)
@@ -33,6 +33,7 @@ Required properties:
 Optional properties:
  - tx_delay: Delay value for TXD timing. Range value is 0~0x7F, 0x30 as default.
  - rx_delay: Delay value for RXD timing. Range value is 0~0x7F, 0x10 as default.
+ - phy-supply: phandle to a regulator if the PHY needs one
 
 Example:
 
index 6762a6b5da7e91f7cf215a62c026885365b015f9..d05c1e1fd9b6f05a763efbeb18179d43ac35a50c 100644 (file)
@@ -9,14 +9,10 @@ The device node has following properties.
 Required properties:
  - compatible  : Can be "st,stih415-dwmac", "st,stih416-dwmac",
    "st,stih407-dwmac", "st,stid127-dwmac".
- - reg : Offset of the glue configuration register map in system
-   configuration regmap pointed by st,syscon property and size.
- - st,syscon : Should be phandle to system configuration node which
-   encompases this glue registers.
+ - st,syscon : Should be phandle/offset pair. The phandle to the syscon node which
+   encompases the glue register, and the offset of the control register.
  - st,gmac_en: this is to enable the gmac into a dedicated sysctl control
    register available on STiH407 SoC.
- - sti-ethconf: this is the gmac glue logic register to enable the GMAC,
-   select among the different modes and program the clk retiming.
  - pinctrl-0: pin-control for all the MII mode supported.
 
 Optional properties:
@@ -40,10 +36,10 @@ ethernet0: dwmac@9630000 {
        device_type = "network";
        status = "disabled";
        compatible = "st,stih407-dwmac", "snps,dwmac", "snps,dwmac-3.710";
-       reg = <0x9630000 0x8000>, <0x80 0x4>;
-       reg-names = "stmmaceth", "sti-ethconf";
+       reg = <0x9630000 0x8000>;
+       reg-names = "stmmaceth";
 
-       st,syscon = <&syscfg_sbc_reg>;
+       st,syscon = <&syscfg_sbc_reg 0x80>;
        st,gmac_en;
        resets = <&softreset STIH407_ETH1_SOFTRESET>;
        reset-names = "stmmaceth";
index c41afd963edf508f36c57f125d092efc282c0429..8ca65cec52ae8cc25e8231891a159c9525297391 100644 (file)
@@ -43,6 +43,7 @@ Optional properties:
   available this clock is used for programming the Timestamp Addend Register.
   If not passed then the system clock will be used and this is fine on some
   platforms.
+- snps,burst_len: The AXI burst lenth value of the AXI BUS MODE register.
 
 Examples:
 
index 42c880886cf74e2937156ce8fb3e0b93067e6ecc..9802d5d911aa677d656bda23e4b4f4cafd89ea84 100644 (file)
@@ -6,8 +6,10 @@ for SATA and PCIe.
 
 Required properties (controller (parent) node):
 - compatible    : Should be "st,miphy365x-phy"
-- st,syscfg     : Should be a phandle of the system configuration register group
-                 which contain the SATA, PCIe mode setting bits
+- st,syscfg     : Phandle / integer array property. Phandle of sysconfig group
+                 containing the miphy registers and integer array should contain
+                 an entry for each port sub-node, specifying the control
+                 register offset inside the sysconfig group.
 
 Required nodes :  A sub-node is required for each channel the controller
                   provides. Address range information including the usual
@@ -26,7 +28,6 @@ Required properties (port (child) node):
                  registers filled in "reg":
                        - sata:   For SATA devices
                        - pcie:   For PCIe devices
-                       - syscfg: To specify the syscfg based config register
 
 Optional properties (port (child) node):
 - st,sata-gen       :  Generation of locally attached SATA IP. Expected values
@@ -39,20 +40,20 @@ Example:
 
        miphy365x_phy: miphy365x@fe382000 {
                compatible      = "st,miphy365x-phy";
-               st,syscfg       = <&syscfg_rear>;
+               st,syscfg       = <&syscfg_rear 0x824 0x828>;
                #address-cells  = <1>;
                #size-cells     = <1>;
                ranges;
 
                phy_port0: port@fe382000 {
-                       reg = <0xfe382000 0x100>, <0xfe394000 0x100>, <0x824 0x4>;
-                       reg-names = "sata", "pcie", "syscfg";
+                       reg = <0xfe382000 0x100>, <0xfe394000 0x100>;
+                       reg-names = "sata", "pcie";
                        #phy-cells = <1>;
                        st,sata-gen = <3>;
                };
 
                phy_port1: port@fe38a000 {
-                       reg = <0xfe38a000 0x100>, <0xfe804000 0x100>, <0x828 0x4>;;
+                       reg = <0xfe38a000 0x100>, <0xfe804000 0x100>;;
                        reg-names = "sata", "pcie", "syscfg";
                        #phy-cells = <1>;
                        st,pcie-tx-pol-inv;
index 1ef8228db73b605db61a4e654460609bcb3bc7b5..de6a706abcdbe900fda70080f76bc9214c1400f1 100644 (file)
@@ -5,10 +5,7 @@ host controllers (when controlling usb2/1.1 devices) available on STiH407 SoC fa
 
 Required properties:
 - compatible           : should be "st,stih407-usb2-phy"
-- reg                  : contain the offset and length of the system configuration registers
-                         used as glue logic to control & parameter phy
-- reg-names            : the names of the system configuration registers in "reg", should be "param" and "reg"
-- st,syscfg            : sysconfig register to manage phy parameter at driver level
+- st,syscfg            : phandle of sysconfig bank plus integer array containing phyparam and phyctrl register offsets
 - resets               : list of phandle and reset specifier pairs. There should be two entries, one
                          for the whole phy and one for the port
 - reset-names          : list of reset signal names. Should be "global" and "port"
@@ -19,11 +16,8 @@ Example:
 
 usb2_picophy0: usbpicophy@f8 {
        compatible      = "st,stih407-usb2-phy";
-       reg             = <0xf8 0x04>,  /* syscfg 5062 */
-                         <0xf4 0x04>;  /* syscfg 5061 */
-       reg-names       = "param", "ctrl";
        #phy-cells      = <0>;
-       st,syscfg       = <&syscfg_core>;
+       st,syscfg       = <&syscfg_core 0x100 0xf4>;
        resets          = <&softreset STIH407_PICOPHY_SOFTRESET>,
                          <&picophyreset STIH407_PICOPHY0_RESET>;
        reset-names     = "global", "port";
index b1df0ad1306c73e89a4d4be90ef4892b511b6cf3..d443279c95dca76539d60e3c55ce3f9f54d61478 100644 (file)
@@ -9,7 +9,6 @@ ad      Avionic Design GmbH
 adapteva       Adapteva, Inc.
 adi    Analog Devices, Inc.
 aeroflexgaisler        Aeroflex Gaisler AB
-ak     Asahi Kasei Corp.
 allwinner      Allwinner Technology Co., Ltd.
 altr   Altera Corp.
 amcc   Applied Micro Circuits Corporation (APM, formally AMCC)
@@ -20,6 +19,7 @@ amstaos       AMS-Taos Inc.
 apm    Applied Micro Circuits Corporation (APM)
 arm    ARM Ltd.
 armadeus       ARMadeus Systems SARL
+asahi-kasei    Asahi Kasei Corp.
 atmel  Atmel Corporation
 auo    AU Optronics Corporation
 avago  Avago Technologies
@@ -127,6 +127,7 @@ pixcir  PIXCIR MICROELECTRONICS Co., Ltd
 powervr        PowerVR (deprecated, use img)
 qca    Qualcomm Atheros, Inc.
 qcom   Qualcomm Technologies, Inc
+qemu   QEMU, a generic and open source machine emulator and virtualizer
 qnap   QNAP Systems, Inc.
 radxa  Radxa
 raidsonic      RaidSonic Technology GmbH
@@ -168,6 +169,7 @@ usi Universal Scientific Industrial Co., Ltd.
 v3     V3 Semiconductor
 variscite      Variscite Ltd.
 via    VIA Technologies, Inc.
+virtio Virtual I/O Device Specification, developed by the OASIS consortium
 voipac Voipac Technologies s.r.o.
 winbond Winbond Electronics corp.
 wlf    Wolfson Microelectronics
index cdb815e833b5b4a603373a4f4e681229f4467da9..2089c051f23fc370c5a7ea90b907a7a02f20d884 100644 (file)
@@ -1277,6 +1277,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        i8042.notimeout [HW] Ignore timeout condition signalled by controller
        i8042.reset     [HW] Reset the controller during init and cleanup
        i8042.unlock    [HW] Unlock (ignore) the keylock
+       i8042.kbdreset  [HW] Reset device connected to KBD port
 
        i810=           [HW,DRM]
 
index 58d08f8d8d8025dbe83481daa0fe76889216910e..9930ecfbb4658f7bd93969b52440e49eeb753de2 100644 (file)
@@ -279,8 +279,8 @@ Possible BPF extensions are shown in the following table:
   hatype                                skb->dev->type
   rxhash                                skb->hash
   cpu                                   raw_smp_processor_id()
-  vlan_tci                              vlan_tx_tag_get(skb)
-  vlan_pr                               vlan_tx_tag_present(skb)
+  vlan_tci                              skb_vlan_tag_get(skb)
+  vlan_pr                               skb_vlan_tag_present(skb)
   rand                                  prandom_u32()
 
 These extensions can also be prefixed with '#'.
index 9bffdfc648dc66149401296d73eb6fc04564ebd1..1b8c964b0d175c7d86d86896469650e5c59fa689 100644 (file)
@@ -66,6 +66,8 @@ fwmark_reflect - BOOLEAN
 route/max_size - INTEGER
        Maximum number of routes allowed in the kernel.  Increase
        this when using large numbers of interfaces and/or routes.
+       From linux kernel 3.6 onwards, this is deprecated for ipv4
+       as route cache is no longer used.
 
 neigh/default/gc_thresh1 - INTEGER
        Minimum number of entries to keep.  Garbage collector will not
@@ -288,6 +290,28 @@ tcp_frto - INTEGER
 
        By default it's enabled with a non-zero value. 0 disables F-RTO.
 
+tcp_invalid_ratelimit - INTEGER
+       Limit the maximal rate for sending duplicate acknowledgments
+       in response to incoming TCP packets that are for an existing
+       connection but that are invalid due to any of these reasons:
+
+         (a) out-of-window sequence number,
+         (b) out-of-window acknowledgment number, or
+         (c) PAWS (Protection Against Wrapped Sequence numbers) check failure
+
+       This can help mitigate simple "ack loop" DoS attacks, wherein
+       a buggy or malicious middlebox or man-in-the-middle can
+       rewrite TCP header fields in manner that causes each endpoint
+       to think that the other is sending invalid TCP segments, thus
+       causing each side to send an unterminating stream of duplicate
+       acknowledgments for invalid segments.
+
+       Using 0 disables rate-limiting of dupacks in response to
+       invalid segments; otherwise this value specifies the minimal
+       space between sending such dupacks, in milliseconds.
+
+       Default: 500 (milliseconds).
+
 tcp_keepalive_time - INTEGER
        How often TCP sends out keepalive messages when keepalive is enabled.
        Default: 2hours.
@@ -1285,6 +1309,13 @@ accept_ra_rtr_pref - BOOLEAN
        Functional default: enabled if accept_ra is enabled.
                            disabled if accept_ra is disabled.
 
+accept_ra_mtu - BOOLEAN
+       Apply the MTU value specified in RA option 5 (RFC4861). If
+       disabled, the MTU specified in the RA will be ignored.
+
+       Functional default: enabled if accept_ra is enabled.
+                           disabled if accept_ra is disabled.
+
 accept_redirects - BOOLEAN
        Accept Redirects.
 
index c6af4bac5aa8f914a83305831e10f285c1699fb2..54f10478e8e30ccda77da9d345c01b9ace781e07 100644 (file)
@@ -199,16 +199,9 @@ frame header.
 TX limitations
 --------------
 
-Kernel processing usually involves validation of the message received by
-user-space, then processing its contents. The kernel must assure that
-userspace is not able to modify the message contents after they have been
-validated. In order to do so, the message is copied from the ring frame
-to an allocated buffer if either of these conditions is false:
-
-- only a single mapping of the ring exists
-- the file descriptor is not shared between processes
-
-This means that for threaded programs, the kernel will fall back to copying.
+As of Jan 2015 the message is always copied from the ring frame to an
+allocated buffer due to unresolved security concerns.
+See commit 4682a0358639b29cf ("netlink: Always copy on mmap TX.").
 
 Example
 -------
index 70da5086153dbd24a9c9258e73cc16440d247519..f55599c62c9d61335005202146fdad75c8c133b9 100644 (file)
@@ -11,7 +11,8 @@ nf_conntrack_buckets - INTEGER (read-only)
        Size of hash table. If not specified as parameter during module
        loading, the default size is calculated by dividing total memory
        by 16384 to determine the number of buckets but the hash table will
-       never have fewer than 32 or more than 16384 buckets.
+       never have fewer than 32 and limited to 16384 buckets. For systems
+       with more than 4GB of memory it will be 65536 buckets.
 
 nf_conntrack_checksum - BOOLEAN
        0 - disabled
index 37c20ee2455e0e54fe749cc4a617ca4463ad553b..b3b9ac61d29d8751baff13446f800a2c9bc9e9fb 100644 (file)
@@ -131,6 +131,19 @@ performs best-effort detection of overlapping wildcarded flows and may reject
 some but not all of them. However, this behavior may change in future versions.
 
 
+Unique flow identifiers
+-----------------------
+
+An alternative to using the original match portion of a key as the handle for
+flow identification is a unique flow identifier, or "UFID". UFIDs are optional
+for both the kernel and user space program.
+
+User space programs that support UFID are expected to provide it during flow
+setup in addition to the flow, then refer to the flow using the UFID for all
+future operations. The kernel is not required to index flows by the original
+flow key if a UFID is specified.
+
+
 Basic rule for evolving flow keys
 ---------------------------------
 
index a5c784c893122788c8fd1d4db270576330b02518..5f0922613f1a8db7963fe3c44d21280d7290d84e 100644 (file)
@@ -162,6 +162,27 @@ SOF_TIMESTAMPING_OPT_CMSG:
   option IP_PKTINFO simultaneously.
 
 
+SOF_TIMESTAMPING_OPT_TSONLY:
+
+  Applies to transmit timestamps only. Makes the kernel return the
+  timestamp as a cmsg alongside an empty packet, as opposed to
+  alongside the original packet. This reduces the amount of memory
+  charged to the socket's receive budget (SO_RCVBUF) and delivers
+  the timestamp even if sysctl net.core.tstamp_allow_data is 0.
+  This option disables SOF_TIMESTAMPING_OPT_CMSG.
+
+
+New applications are encouraged to pass SOF_TIMESTAMPING_OPT_ID to
+disambiguate timestamps and SOF_TIMESTAMPING_OPT_TSONLY to operate
+regardless of the setting of sysctl net.core.tstamp_allow_data.
+
+An exception is when a process needs additional cmsg data, for
+instance SOL_IP/IP_PKTINFO to detect the egress network interface.
+Then pass option SOF_TIMESTAMPING_OPT_CMSG. This option depends on
+having access to the contents of the original packet, so cannot be
+combined with SOF_TIMESTAMPING_OPT_TSONLY.
+
+
 1.4 Bytestream Timestamps
 
 The SO_TIMESTAMPING interface supports timestamping of bytes in a
index 876f71c5625aba4686fe7aede6769ccf6f700110..8217510d3842b3a7e599a92c7f7502d2bbdcde86 100644 (file)
@@ -30,6 +30,8 @@
  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
+#define _GNU_SOURCE
+
 #include <arpa/inet.h>
 #include <asm/types.h>
 #include <error.h>
 #include <time.h>
 #include <unistd.h>
 
-/* ugly hack to work around netinet/in.h and linux/ipv6.h conflicts */
-#ifndef in6_pktinfo
-struct in6_pktinfo {
-       struct in6_addr ipi6_addr;
-       int             ipi6_ifindex;
-};
-#endif
-
 /* command line parameters */
 static int cfg_proto = SOCK_STREAM;
 static int cfg_ipproto = IPPROTO_TCP;
@@ -76,6 +70,7 @@ static int do_ipv6 = 1;
 static int cfg_payload_len = 10;
 static bool cfg_show_payload;
 static bool cfg_do_pktinfo;
+static bool cfg_loop_nodata;
 static uint16_t dest_port = 9000;
 
 static struct sockaddr_in daddr;
@@ -147,6 +142,9 @@ static void print_payload(char *data, int len)
 {
        int i;
 
+       if (!len)
+               return;
+
        if (len > 70)
                len = 70;
 
@@ -183,6 +181,7 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len)
        struct sock_extended_err *serr = NULL;
        struct scm_timestamping *tss = NULL;
        struct cmsghdr *cm;
+       int batch = 0;
 
        for (cm = CMSG_FIRSTHDR(msg);
             cm && cm->cmsg_len;
@@ -215,10 +214,18 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len)
                } else
                        fprintf(stderr, "unknown cmsg %d,%d\n",
                                        cm->cmsg_level, cm->cmsg_type);
+
+               if (serr && tss) {
+                       print_timestamp(tss, serr->ee_info, serr->ee_data,
+                                       payload_len);
+                       serr = NULL;
+                       tss = NULL;
+                       batch++;
+               }
        }
 
-       if (serr && tss)
-               print_timestamp(tss, serr->ee_info, serr->ee_data, payload_len);
+       if (batch > 1)
+               fprintf(stderr, "batched %d timestamps\n", batch);
 }
 
 static int recv_errmsg(int fd)
@@ -250,7 +257,7 @@ static int recv_errmsg(int fd)
        if (ret == -1 && errno != EAGAIN)
                error(1, errno, "recvmsg");
 
-       if (ret > 0) {
+       if (ret >= 0) {
                __recv_errmsg_cmsg(&msg, ret);
                if (cfg_show_payload)
                        print_payload(data, cfg_payload_len);
@@ -315,6 +322,9 @@ static void do_test(int family, unsigned int opt)
        opt |= SOF_TIMESTAMPING_SOFTWARE |
               SOF_TIMESTAMPING_OPT_CMSG |
               SOF_TIMESTAMPING_OPT_ID;
+       if (cfg_loop_nodata)
+               opt |= SOF_TIMESTAMPING_OPT_TSONLY;
+
        if (setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING,
                       (char *) &opt, sizeof(opt)))
                error(1, 0, "setsockopt timestamping");
@@ -384,6 +394,7 @@ static void __attribute__((noreturn)) usage(const char *filepath)
                        "  -h:   show this message\n"
                        "  -I:   request PKTINFO\n"
                        "  -l N: send N bytes at a time\n"
+                       "  -n:   set no-payload option\n"
                        "  -r:   use raw\n"
                        "  -R:   use raw (IP_HDRINCL)\n"
                        "  -p N: connect to port N\n"
@@ -398,7 +409,7 @@ static void parse_opt(int argc, char **argv)
        int proto_count = 0;
        char c;
 
-       while ((c = getopt(argc, argv, "46hIl:p:rRux")) != -1) {
+       while ((c = getopt(argc, argv, "46hIl:np:rRux")) != -1) {
                switch (c) {
                case '4':
                        do_ipv6 = 0;
@@ -409,6 +420,9 @@ static void parse_opt(int argc, char **argv)
                case 'I':
                        cfg_do_pktinfo = true;
                        break;
+               case 'n':
+                       cfg_loop_nodata = true;
+                       break;
                case 'r':
                        proto_count++;
                        cfg_proto = SOCK_RAW;
index 666594b43cfff9f7a50678e3d74bd29c116d3648..6294b5186ae552b8b2fcb78ac4b8617ee6f8fe38 100644 (file)
@@ -97,6 +97,14 @@ rmem_max
 
 The maximum receive socket buffer size in bytes.
 
+tstamp_allow_data
+-----------------
+Allow processes to receive tx timestamps looped together with the original
+packet contents. If disabled, transmit timestamp requests from unprivileged
+processes are dropped unless socket option SOF_TIMESTAMPING_OPT_TSONLY is set.
+Default: 1 (on)
+
+
 wmem_default
 ------------
 
index 230ce71f4d75529ff4e071ed25e7bf0b4f72cd3c..2b47704f75cb3bfedf836cf02c75afd82c91e405 100755 (executable)
@@ -389,9 +389,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        .release_cmd                    = " + fabric_mod_name + "_release_cmd,\n"
        buf += "        .shutdown_session               = " + fabric_mod_name + "_shutdown_session,\n"
        buf += "        .close_session                  = " + fabric_mod_name + "_close_session,\n"
-       buf += "        .stop_session                   = " + fabric_mod_name + "_stop_session,\n"
-       buf += "        .fall_back_to_erl0              = " + fabric_mod_name + "_reset_nexus,\n"
-       buf += "        .sess_logged_in                 = " + fabric_mod_name + "_sess_logged_in,\n"
        buf += "        .sess_get_index                 = " + fabric_mod_name + "_sess_get_index,\n"
        buf += "        .sess_get_initiator_sid         = NULL,\n"
        buf += "        .write_pending                  = " + fabric_mod_name + "_write_pending,\n"
@@ -402,7 +399,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        .queue_data_in                  = " + fabric_mod_name + "_queue_data_in,\n"
        buf += "        .queue_status                   = " + fabric_mod_name + "_queue_status,\n"
        buf += "        .queue_tm_rsp                   = " + fabric_mod_name + "_queue_tm_rsp,\n"
-       buf += "        .is_state_remove                = " + fabric_mod_name + "_is_state_remove,\n"
+       buf += "        .aborted_task                   = " + fabric_mod_name + "_aborted_task,\n"
        buf += "        /*\n"
        buf += "         * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
        buf += "         */\n"
@@ -428,7 +425,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        /*\n"
        buf += "         * Register the top level struct config_item_type with TCM core\n"
        buf += "         */\n"
-       buf += "        fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
+       buf += "        fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n"
        buf += "        if (IS_ERR(fabric)) {\n"
        buf += "                printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
        buf += "                return PTR_ERR(fabric);\n"
@@ -595,7 +592,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                if re.search('get_fabric_name', fo):
                        buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
                        buf += "{\n"
-                       buf += "        return \"" + fabric_mod_name[4:] + "\";\n"
+                       buf += "        return \"" + fabric_mod_name + "\";\n"
                        buf += "}\n\n"
                        bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
                        continue
@@ -820,27 +817,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                        buf += "}\n\n"
                        bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
 
-               if re.search('stop_session\)\(', fo):
-                       buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
-                       buf += "{\n"
-                       buf += "        return;\n"
-                       buf += "}\n\n"
-                       bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
-
-               if re.search('fall_back_to_erl0\)\(', fo):
-                       buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
-                       buf += "{\n"
-                       buf += "        return;\n"
-                       buf += "}\n\n"
-                       bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
-
-               if re.search('sess_logged_in\)\(', fo):
-                       buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
-                       buf += "{\n"
-                       buf += "        return 0;\n"
-                       buf += "}\n\n"
-                       bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
-
                if re.search('sess_get_index\)\(', fo):
                        buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
                        buf += "{\n"
@@ -898,19 +874,18 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                        bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
 
                if re.search('queue_tm_rsp\)\(', fo):
-                       buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
+                       buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
                        buf += "{\n"
-                       buf += "        return 0;\n"
+                       buf += "        return;\n"
                        buf += "}\n\n"
-                       bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
+                       bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
 
-               if re.search('is_state_remove\)\(', fo):
-                       buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
+               if re.search('aborted_task\)\(', fo):
+                       buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
                        buf += "{\n"
-                       buf += "        return 0;\n"
+                       buf += "        return;\n"
                        buf += "}\n\n"
-                       bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
-
+                       bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
 
        ret = p.write(buf)
        if ret:
@@ -1018,11 +993,11 @@ def main(modname, proto_ident):
        tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
        tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
 
-       input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
+       input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
        if input == "yes" or input == "y":
                tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
 
-       input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
+       input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
        if input == "yes" or input == "y":
                tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
 
index fca24c931ec8dcb737012b6b67f6b88a8fef2223..753e47cc2e2036cd53e176241f579addd3b43ec1 100644 (file)
@@ -3,7 +3,7 @@ CPU cooling APIs How To
 
 Written by Amit Daniel Kachhap <amit.kachhap@linaro.org>
 
-Updated: 12 May 2012
+Updated: 6 Jan 2015
 
 Copyright (c)  2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
 
@@ -25,7 +25,18 @@ the user. The registration APIs returns the cooling device pointer.
 
    clip_cpus: cpumask of cpus where the frequency constraints will happen.
 
-1.1.2 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
+1.1.2 struct thermal_cooling_device *of_cpufreq_cooling_register(
+       struct device_node *np, const struct cpumask *clip_cpus)
+
+    This interface function registers the cpufreq cooling device with
+    the name "thermal-cpufreq-%x" linking it with a device tree node, in
+    order to bind it via the thermal DT code. This api can support multiple
+    instances of cpufreq cooling devices.
+
+    np: pointer to the cooling device device tree node
+    clip_cpus: cpumask of cpus where the frequency constraints will happen.
+
+1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
 
     This interface function unregisters the "thermal-cpufreq-%x" cooling device.
 
index 788d3a1fc7232cc606cdb7fdd7c7d38a8f228b71..2b3aca7e40b93876216b4e02c8e0abc6e81131cc 100644 (file)
@@ -696,7 +696,7 @@ L:  alsa-devel@alsa-project.org (moderated for non-subscribers)
 W:     http://blackfin.uclinux.org/
 S:     Supported
 F:     sound/soc/blackfin/*
+
 ANALOG DEVICES INC IIO DRIVERS
 M:     Lars-Peter Clausen <lars@metafoo.de>
 M:     Michael Hennerich <Michael.Hennerich@analog.com>
@@ -708,6 +708,16 @@ X: drivers/iio/*/adjd*
 F:     drivers/staging/iio/*/ad*
 F:     staging/iio/trigger/iio-trig-bfin-timer.c
 
+ANDROID DRIVERS
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+M:     Arve Hjønnevåg <arve@android.com>
+M:     Riley Andrews <riandrews@android.com>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/gregkh/staging.git
+L:     devel@driverdev.osuosl.org
+S:     Supported
+F:     drivers/android/
+F:     drivers/staging/android/
+
 AOA (Apple Onboard Audio) ALSA DRIVER
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linuxppc-dev@lists.ozlabs.org
@@ -724,15 +734,15 @@ F:        include/uapi/linux/apm_bios.h
 F:     drivers/char/apm-emulation.c
 
 APPLE BCM5974 MULTITOUCH DRIVER
-M:     Henrik Rydberg <rydberg@euromail.se>
+M:     Henrik Rydberg <rydberg@bitmath.org>
 L:     linux-input@vger.kernel.org
-S:     Maintained
+S:     Odd fixes
 F:     drivers/input/mouse/bcm5974.c
 
 APPLE SMC DRIVER
-M:     Henrik Rydberg <rydberg@euromail.se>
+M:     Henrik Rydberg <rydberg@bitmath.org>
 L:     lm-sensors@lm-sensors.org
-S:     Maintained
+S:     Odd fixes
 F:     drivers/hwmon/applesmc.c
 
 APPLETALK NETWORK LAYER
@@ -754,13 +764,6 @@ L: linux-media@vger.kernel.org
 S:     Maintained
 F:     drivers/media/i2c/aptina-pll.*
 
-ARASAN COMPACT FLASH PATA CONTROLLER
-M:     Viresh Kumar <viresh.linux@gmail.com>
-L:     linux-ide@vger.kernel.org
-S:     Maintained
-F:     include/linux/pata_arasan_cf_data.h
-F:     drivers/ata/pata_arasan_cf.c
-
 ARC FRAMEBUFFER DRIVER
 M:     Jaya Kumar <jayalk@intworks.biz>
 S:     Maintained
@@ -2258,6 +2261,7 @@ F:        drivers/gpio/gpio-bt8xx.c
 BTRFS FILE SYSTEM
 M:     Chris Mason <clm@fb.com>
 M:     Josef Bacik <jbacik@fb.com>
+M:     David Sterba <dsterba@suse.cz>
 L:     linux-btrfs@vger.kernel.org
 W:     http://btrfs.wiki.kernel.org/
 Q:     http://patchwork.kernel.org/project/linux-btrfs/list/
@@ -2344,7 +2348,8 @@ CAN NETWORK LAYER
 M:     Oliver Hartkopp <socketcan@hartkopp.net>
 L:     linux-can@vger.kernel.org
 W:     http://gitorious.org/linux-can
-T:     git git://gitorious.org/linux-can/linux-can-next.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
 S:     Maintained
 F:     Documentation/networking/can.txt
 F:     net/can/
@@ -2359,7 +2364,8 @@ M:        Wolfgang Grandegger <wg@grandegger.com>
 M:     Marc Kleine-Budde <mkl@pengutronix.de>
 L:     linux-can@vger.kernel.org
 W:     http://gitorious.org/linux-can
-T:     git git://gitorious.org/linux-can/linux-can-next.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
 S:     Maintained
 F:     drivers/net/can/
 F:     include/linux/can/dev.h
@@ -3181,7 +3187,7 @@ L:        dmaengine@vger.kernel.org
 Q:     https://patchwork.kernel.org/project/linux-dmaengine/list/
 S:     Maintained
 F:     drivers/dma/
-F:     include/linux/dma*
+F:     include/linux/dmaengine.h
 F:     Documentation/dmaengine/
 T:     git git://git.infradead.org/users/vkoul/slave-dma.git
 
@@ -4747,20 +4753,20 @@ S:      Supported
 F:     drivers/scsi/ipr.*
 
 IBM Power Virtual Ethernet Device Driver
-M:     Santiago Leon <santil@linux.vnet.ibm.com>
+M:     Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/ibm/ibmveth.*
 
 IBM Power Virtual SCSI Device Drivers
-M:     Nathan Fontenot <nfont@linux.vnet.ibm.com>
+M:     Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
 L:     linux-scsi@vger.kernel.org
 S:     Supported
 F:     drivers/scsi/ibmvscsi/ibmvscsi*
 F:     drivers/scsi/ibmvscsi/viosrp.h
 
 IBM Power Virtual FC Device Drivers
-M:     Brian King <brking@linux.vnet.ibm.com>
+M:     Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
 L:     linux-scsi@vger.kernel.org
 S:     Supported
 F:     drivers/scsi/ibmvscsi/ibmvfc*
@@ -4928,7 +4934,6 @@ F:        include/uapi/linux/inotify.h
 
 INPUT (KEYBOARD, MOUSE, JOYSTICK, TOUCHSCREEN) DRIVERS
 M:     Dmitry Torokhov <dmitry.torokhov@gmail.com>
-M:     Dmitry Torokhov <dtor@mail.ru>
 L:     linux-input@vger.kernel.org
 Q:     http://patchwork.kernel.org/project/linux-input/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input.git
@@ -4939,10 +4944,10 @@ F:      include/uapi/linux/input.h
 F:     include/linux/input/
 
 INPUT MULTITOUCH (MT) PROTOCOL
-M:     Henrik Rydberg <rydberg@euromail.se>
+M:     Henrik Rydberg <rydberg@bitmath.org>
 L:     linux-input@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git
-S:     Maintained
+S:     Odd fixes
 F:     Documentation/input/multi-touch-protocol.txt
 F:     drivers/input/input-mt.c
 K:     \b(ABS|SYN)_MT_
@@ -4950,7 +4955,6 @@ K:        \b(ABS|SYN)_MT_
 INTEL C600 SERIES SAS CONTROLLER DRIVER
 M:     Intel SCU Linux support <intel-linux-scu@intel.com>
 M:     Artur Paszkiewicz <artur.paszkiewicz@intel.com>
-M:     Dave Jiang <dave.jiang@intel.com>
 L:     linux-scsi@vger.kernel.org
 T:     git git://git.code.sf.net/p/intel-sas/isci
 S:     Supported
@@ -5278,6 +5282,15 @@ W:       www.open-iscsi.org
 Q:     http://patchwork.kernel.org/project/linux-rdma/list/
 F:     drivers/infiniband/ulp/iser/
 
+ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
+M:     Sagi Grimberg <sagig@mellanox.com>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
+L:     linux-rdma@vger.kernel.org
+L:     target-devel@vger.kernel.org
+S:     Supported
+W:     http://www.linux-iscsi.org
+F:     drivers/infiniband/ulp/isert
+
 ISDN SUBSYSTEM
 M:     Karsten Keil <isdn@linux-pingi.de>
 L:     isdn4linux@listserv.isdn4linux.de (subscribers-only)
@@ -5692,6 +5705,49 @@ F:       drivers/lguest/
 F:     include/linux/lguest*.h
 F:     tools/lguest/
 
+LIBATA SUBSYSTEM (Serial and Parallel ATA drivers)
+M:     Tejun Heo <tj@kernel.org>
+L:     linux-ide@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S:     Maintained
+F:     drivers/ata/
+F:     include/linux/ata.h
+F:     include/linux/libata.h
+
+LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
+M:     Viresh Kumar <viresh.linux@gmail.com>
+L:     linux-ide@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S:     Maintained
+F:     include/linux/pata_arasan_cf_data.h
+F:     drivers/ata/pata_arasan_cf.c
+
+LIBATA PATA DRIVERS
+M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+M:     Tejun Heo <tj@kernel.org>
+L:     linux-ide@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S:     Maintained
+F:     drivers/ata/pata_*.c
+F:     drivers/ata/ata_generic.c
+
+LIBATA SATA AHCI PLATFORM devices support
+M:     Hans de Goede <hdegoede@redhat.com>
+M:     Tejun Heo <tj@kernel.org>
+L:     linux-ide@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S:     Maintained
+F:     drivers/ata/ahci_platform.c
+F:     drivers/ata/libahci_platform.c
+F:     include/linux/ahci_platform.h
+
+LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER
+M:     Mikael Pettersson <mikpelinux@gmail.com>
+L:     linux-ide@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S:     Maintained
+F:     drivers/ata/sata_promise.*
+
 LIBLOCKDEP
 M:     Sasha Levin <sasha.levin@oracle.com>
 S:     Maintained
@@ -6567,6 +6623,7 @@ F:        include/linux/netdevice.h
 F:     include/uapi/linux/in.h
 F:     include/uapi/linux/net.h
 F:     include/uapi/linux/netdevice.h
+F:     include/uapi/linux/net_namespace.h
 F:     tools/net/
 F:     tools/testing/selftests/net/
 F:     lib/random32.c
@@ -6976,14 +7033,12 @@ OPEN FIRMWARE AND FLATTENED DEVICE TREE
 M:     Grant Likely <grant.likely@linaro.org>
 M:     Rob Herring <robh+dt@kernel.org>
 L:     devicetree@vger.kernel.org
-W:     http://fdt.secretlab.ca
-T:     git git://git.secretlab.ca/git/linux-2.6.git
+W:     http://www.devicetree.org/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git
 S:     Maintained
 F:     drivers/of/
 F:     include/linux/of*.h
 F:     scripts/dtc/
-K:     of_get_property
-K:     of_match_table
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
 M:     Rob Herring <robh+dt@kernel.org>
@@ -7229,7 +7284,7 @@ S:        Maintained
 F:     drivers/pci/host/*layerscape*
 
 PCI DRIVER FOR IMX6
-M:     Richard Zhu <r65037@freescale.com>
+M:     Richard Zhu <Richard.Zhu@freescale.com>
 M:     Lucas Stach <l.stach@pengutronix.de>
 L:     linux-pci@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -7399,6 +7454,7 @@ F:        drivers/crypto/picoxcell*
 PIN CONTROL SUBSYSTEM
 M:     Linus Walleij <linus.walleij@linaro.org>
 L:     linux-gpio@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
 S:     Maintained
 F:     drivers/pinctrl/
 F:     include/linux/pinctrl/
@@ -7566,12 +7622,6 @@ W:       http://wireless.kernel.org/en/users/Drivers/p54
 S:     Obsolete
 F:     drivers/net/wireless/prism54/
 
-PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
-M:     Mikael Pettersson <mikpelinux@gmail.com>
-L:     linux-ide@vger.kernel.org
-S:     Maintained
-F:     drivers/ata/sata_promise.*
-
 PS3 NETWORK SUPPORT
 M:     Geoff Levand <geoff@infradead.org>
 L:     netdev@vger.kernel.org
@@ -7737,8 +7787,7 @@ F:        Documentation/scsi/LICENSE.qla2xxx
 F:     drivers/scsi/qla2xxx/
 
 QLOGIC QLA4XXX iSCSI DRIVER
-M:     Vikas Chaudhary <vikas.chaudhary@qlogic.com>
-M:     iscsi-driver@qlogic.com
+M:     QLogic-Storage-Upstream@qlogic.com
 L:     linux-scsi@vger.kernel.org
 S:     Supported
 F:     Documentation/scsi/LICENSE.qla4xxx
@@ -8013,6 +8062,13 @@ S:       Maintained
 F:     Documentation/rfkill.txt
 F:     net/rfkill/
 
+RHASHTABLE
+M:     Thomas Graf <tgraf@suug.ch>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     lib/rhashtable.c
+F:     include/linux/rhashtable.h
+
 RICOH SMARTMEDIA/XD DRIVER
 M:     Maxim Levitsky <maximlevitsky@gmail.com>
 S:     Maintained
@@ -8546,25 +8602,6 @@ S:       Maintained
 F:     drivers/misc/phantom.c
 F:     include/uapi/linux/phantom.h
 
-SERIAL ATA (SATA) SUBSYSTEM
-M:     Tejun Heo <tj@kernel.org>
-L:     linux-ide@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
-S:     Supported
-F:     drivers/ata/
-F:     include/linux/ata.h
-F:     include/linux/libata.h
-
-SERIAL ATA AHCI PLATFORM devices support
-M:     Hans de Goede <hdegoede@redhat.com>
-M:     Tejun Heo <tj@kernel.org>
-L:     linux-ide@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
-S:     Supported
-F:     drivers/ata/ahci_platform.c
-F:     drivers/ata/libahci_platform.c
-F:     include/linux/ahci_platform.h
-
 SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
 M:     Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
 L:     linux-scsi@vger.kernel.org
@@ -9533,7 +9570,8 @@ F:        drivers/platform/x86/thinkpad_acpi.c
 TI BANDGAP AND THERMAL DRIVER
 M:     Eduardo Valentin <edubezval@gmail.com>
 L:     linux-pm@vger.kernel.org
-S:     Supported
+L:     linux-omap@vger.kernel.org
+S:     Maintained
 F:     drivers/thermal/ti-soc-thermal/
 
 TI CLOCK DRIVER
@@ -9590,6 +9628,13 @@ F:       drivers/power/lp8788-charger.c
 F:     drivers/regulator/lp8788-*.c
 F:     include/linux/mfd/lp8788*.h
 
+TI NETCP ETHERNET DRIVER
+M:     Wingman Kwok <w-kwok2@ti.com>
+M:     Murali Karicheri <m-karicheri2@ti.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/ethernet/ti/netcp*
+
 TI TWL4030 SERIES SOC CODEC DRIVER
 M:     Peter Ujfalusi <peter.ujfalusi@ti.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -10146,6 +10191,7 @@ USERSPACE I/O (UIO)
 M:     "Hans J. Koch" <hjk@hansjkoch.de>
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
 F:     Documentation/DocBook/uio-howto.tmpl
 F:     drivers/uio/
 F:     include/linux/uio*.h
index ef748e17702f5109bf2678fb57f7929ef411d938..c8e17c05f9163ae74dd9434e4f68b15188190558 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc7
 NAME = Diseased Newt
 
 # *DOCUMENTATION*
@@ -391,6 +391,7 @@ USERINCLUDE    := \
 # Needed to be compatible with the O= option
 LINUXINCLUDE    := \
                -I$(srctree)/arch/$(hdr-arch)/include \
+               -Iarch/$(hdr-arch)/include/generated/uapi \
                -Iarch/$(hdr-arch)/include/generated \
                $(if $(KBUILD_SRC), -I$(srctree)/include) \
                -Iinclude \
index 076c35cd6cde7c2d782a46fe3942b8962369230e..98a1525fa164df0178fd4cb5fc0267129b2844e3 100644 (file)
@@ -285,8 +285,12 @@ pcibios_claim_one_bus(struct pci_bus *b)
                        if (r->parent || !r->start || !r->flags)
                                continue;
                        if (pci_has_flag(PCI_PROBE_ONLY) ||
-                           (r->flags & IORESOURCE_PCI_FIXED))
-                               pci_claim_resource(dev, i);
+                           (r->flags & IORESOURCE_PCI_FIXED)) {
+                               if (pci_claim_resource(dev, i) == 0)
+                                       continue;
+
+                               pci_claim_bridge_resource(dev, i);
+                       }
                }
        }
 
index 98838a05ba6d89f0459742131010f57c38cbed05..9d0ac091a52a7d16cf1f78f402ab48c511924a24 100644 (file)
@@ -156,6 +156,8 @@ retry:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 6f7e3a68803a097461d27ace9fa940ba3211ab6b..563cb27e37f55f3f99badc9b99e64aca6ee397b7 100644 (file)
@@ -161,6 +161,8 @@ good_area:
 
        if (fault & VM_FAULT_OOM)
                goto out_of_memory;
+       else if (fault & VM_FAULT_SIGSEGV)
+               goto bad_area;
        else if (fault & VM_FAULT_SIGBUS)
                goto do_sigbus;
 
index 68be9017593df10f235ca228a6837aaa11f5c818..132c70e2d2f11cfb4e655ddcb05931be5bd03498 100644 (file)
@@ -263,16 +263,37 @@ restart:  adr     r0, LC0
                 * OK... Let's do some funky business here.
                 * If we do have a DTB appended to zImage, and we do have
                 * an ATAG list around, we want the later to be translated
-                * and folded into the former here.  To be on the safe side,
-                * let's temporarily move  the stack away into the malloc
-                * area.  No GOT fixup has occurred yet, but none of the
-                * code we're about to call uses any global variable.
+                * and folded into the former here. No GOT fixup has occurred
+                * yet, but none of the code we're about to call uses any
+                * global variable.
                */
-               add     sp, sp, #0x10000
+
+               /* Get the initial DTB size */
+               ldr     r5, [r6, #4]
+#ifndef __ARMEB__
+               /* convert to little endian */
+               eor     r1, r5, r5, ror #16
+               bic     r1, r1, #0x00ff0000
+               mov     r5, r5, ror #8
+               eor     r5, r5, r1, lsr #8
+#endif
+               /* 50% DTB growth should be good enough */
+               add     r5, r5, r5, lsr #1
+               /* preserve 64-bit alignment */
+               add     r5, r5, #7
+               bic     r5, r5, #7
+               /* clamp to 32KB min and 1MB max */
+               cmp     r5, #(1 << 15)
+               movlo   r5, #(1 << 15)
+               cmp     r5, #(1 << 20)
+               movhi   r5, #(1 << 20)
+               /* temporarily relocate the stack past the DTB work space */
+               add     sp, sp, r5
+
                stmfd   sp!, {r0-r3, ip, lr}
                mov     r0, r8
                mov     r1, r6
-               sub     r2, sp, r6
+               mov     r2, r5
                bl      atags_to_fdt
 
                /*
@@ -285,11 +306,11 @@ restart:  adr     r0, LC0
                bic     r0, r0, #1
                add     r0, r0, #0x100
                mov     r1, r6
-               sub     r2, sp, r6
+               mov     r2, r5
                bleq    atags_to_fdt
 
                ldmfd   sp!, {r0-r3, ip, lr}
-               sub     sp, sp, #0x10000
+               sub     sp, sp, r5
 #endif
 
                mov     r8, r6                  @ use the appended device tree
@@ -306,7 +327,7 @@ restart:    adr     r0, LC0
                subs    r1, r5, r1
                addhi   r9, r9, r1
 
-               /* Get the dtb's size */
+               /* Get the current DTB size */
                ldr     r5, [r6, #4]
 #ifndef __ARMEB__
                /* convert r5 (dtb size) to little endian */
index 5a452fdd7c5d9711cec9f6d0196f90b0726fefc3..c90724bded1081b7ea6e22461451e39d48828cec 100644 (file)
@@ -31,6 +31,7 @@
                        status = "disabled";
                        reg = <0x5c000000 0x30000>;
                        interrupts = <67 68 69 70>;
+                       syscon = <&omap3_scm_general>;
                        ti,davinci-ctrl-reg-offset = <0x10000>;
                        ti,davinci-ctrl-mod-reg-offset = <0>;
                        ti,davinci-ctrl-ram-offset = <0x20000>;
index 1466580be2954996c43fc189bc263967b37a49fc..70b1943a86b104502449c1c58153d9ed68c7c469 100644 (file)
                compatible = "linux,spdif-dir";
        };
 };
-
-&pinctrl {
-       /*
-        * These pins might be muxed as I2S by
-        * the bootloader, but it conflicts
-        * with the real I2S pins that are
-        * muxed using i2s_pins. We must mux
-        * those pins to a function other than
-        * I2S.
-        */
-       pinctrl-0 = <&hog_pins1 &hog_pins2>;
-       pinctrl-names = "default";
-
-       hog_pins1: hog-pins1 {
-               marvell,pins = "mpp6",  "mpp8", "mpp10",
-                              "mpp12", "mpp13";
-               marvell,function = "gpio";
-       };
-
-       hog_pins2: hog-pins2 {
-               marvell,pins = "mpp5", "mpp7", "mpp9";
-               marvell,function = "gpo";
-       };
-};
index 1467750e3377d161bddff0cfcce1f35d91c9261b..e8c6c600a5b69335bbea0fdabf70e7ccee7ff163 100644 (file)
                        interrupts = <26 IRQ_TYPE_LEVEL_HIGH 3>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_fb>;
+                       clocks = <&lcd_clk>, <&lcd_clk>;
+                       clock-names = "lcdc_clk", "hclk";
                        status = "disabled";
                };
 
index 28e7e2060c3399c204f547b37fc325fa1069e8d8..a98ac1bd8f65124fe69d43aa8e8b467a2a7c911c 100644 (file)
@@ -65,6 +65,8 @@
 };
 
 &sdhci2 {
+       broken-cd;
+       bus-width = <8>;
        non-removable;
        status = "okay";
 };
index 35253c947a7cd0002211dac773d7f1f9723d6fce..e2f61f27944e24fd45cc65518126f932409ea610 100644 (file)
@@ -83,7 +83,8 @@
                        compatible = "mrvl,pxav3-mmc";
                        reg = <0xab1000 0x200>;
                        interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&chip CLKID_SDIO1XIN>;
+                       clocks = <&chip CLKID_NFC_ECC>, <&chip CLKID_NFC>;
+                       clock-names = "io", "core";
                        status = "disabled";
                };
 
                                interrupt-parent = <&gic>;
                                interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
                        };
-
-                       gpio4: gpio@5000 {
-                               compatible = "snps,dw-apb-gpio";
-                               reg = <0x5000 0x400>;
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-
-                               porte: gpio-port@4 {
-                                       compatible = "snps,dw-apb-gpio-port";
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       snps,nr-gpios = <32>;
-                                       reg = <0>;
-                               };
-                       };
-
-                       gpio5: gpio@c000 {
-                               compatible = "snps,dw-apb-gpio";
-                               reg = <0xc000 0x400>;
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-
-                               portf: gpio-port@5 {
-                                       compatible = "snps,dw-apb-gpio-port";
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       snps,nr-gpios = <32>;
-                                       reg = <0>;
-                               };
-                       };
                };
 
                chip: chip-control@ea0000 {
                        ranges = <0 0xfc0000 0x10000>;
                        interrupt-parent = <&sic>;
 
+                       sm_gpio1: gpio@5000 {
+                               compatible = "snps,dw-apb-gpio";
+                               reg = <0x5000 0x400>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               portf: gpio-port@5 {
+                                       compatible = "snps,dw-apb-gpio-port";
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       snps,nr-gpios = <32>;
+                                       reg = <0>;
+                               };
+                       };
+
                        i2c2: i2c@7000 {
                                compatible = "snps,designware-i2c";
                                #address-cells = <1>;
                                status = "disabled";
                        };
 
+                       sm_gpio0: gpio@c000 {
+                               compatible = "snps,dw-apb-gpio";
+                               reg = <0xc000 0x400>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               porte: gpio-port@4 {
+                                       compatible = "snps,dw-apb-gpio-port";
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       snps,nr-gpios = <32>;
+                                       reg = <0>;
+                               };
+                       };
+
                        sysctrl: pin-controller@d000 {
                                compatible = "marvell,berlin2q-system-ctrl";
                                reg = <0xd000 0x100>;
index 10b725c7bfc02fc79e8c694150ec1e23c5493175..ad4118f7e1a6106139af2a6bfe56d3534c306af9 100644 (file)
                };
                partition@5 {
                        label = "QSPI.u-boot-spl-os";
-                       reg = <0x00140000 0x00010000>;
+                       reg = <0x00140000 0x00080000>;
                };
                partition@6 {
                        label = "QSPI.u-boot-env";
-                       reg = <0x00150000 0x00010000>;
+                       reg = <0x001c0000 0x00010000>;
                };
                partition@7 {
                        label = "QSPI.u-boot-env.backup1";
-                       reg = <0x00160000 0x0010000>;
+                       reg = <0x001d0000 0x0010000>;
                };
                partition@8 {
                        label = "QSPI.kernel";
-                       reg = <0x00170000 0x0800000>;
+                       reg = <0x001e0000 0x0800000>;
                };
                partition@9 {
                        label = "QSPI.file-system";
-                       reg = <0x00970000 0x01690000>;
+                       reg = <0x009e0000 0x01620000>;
                };
        };
 };
index 22771bc1643afcd7652773f058e6ace90df5d2a9..63f8b007bdc51358d53cdd57b58c8fe1a21f617a 100644 (file)
                                tx-fifo-resize;
                                maximum-speed = "super-speed";
                                dr_mode = "otg";
+                               snps,dis_u3_susphy_quirk;
+                               snps,dis_u2_susphy_quirk;
                        };
                };
 
                                tx-fifo-resize;
                                maximum-speed = "high-speed";
                                dr_mode = "otg";
+                               snps,dis_u3_susphy_quirk;
+                               snps,dis_u2_susphy_quirk;
                        };
                };
 
                                tx-fifo-resize;
                                maximum-speed = "high-speed";
                                dr_mode = "otg";
+                               snps,dis_u3_susphy_quirk;
+                               snps,dis_u2_susphy_quirk;
                        };
                };
 
index 0a229fcd7acfdfff4e07b4359cf0c7776e510efc..d75c89d7666a0a0bea5fff5139439f8090eee793 100644 (file)
 
        dp_phy: video-phy@10040720 {
                compatible = "samsung,exynos5250-dp-video-phy";
-               reg = <0x10040720 4>;
+               samsung,pmu-syscon = <&pmu_system_controller>;
                #phy-cells = <0>;
        };
 
index aa7a7d727a7e80033df0cddb0b9cce8596d48ddc..db2c1c4cd90076b5c7bb47d12737e46bf4938264 100644 (file)
 &usbdrd_dwc3_1 {
        dr_mode = "host";
 };
+
+&cci {
+       status = "disabled";
+};
index 517e50f6760b0cf4d77bc55bf899f0aa5e3667a1..6d38f8bfd0e68e71358608a9af0ec32dbc5a53c2 100644 (file)
                };
        };
 
-       cci@10d20000 {
+       cci: cci@10d20000 {
                compatible = "arm,cci-400";
                #address-cells = <1>;
                #size-cells = <1>;
        };
 
        dp_phy: video-phy@10040728 {
-               compatible = "samsung,exynos5250-dp-video-phy";
-               reg = <0x10040728 4>;
+               compatible = "samsung,exynos5420-dp-video-phy";
+               samsung,pmu-syscon = <&pmu_system_controller>;
                #phy-cells = <0>;
        };
 
index 58d3c3cf2923f5ffae5e1657140fc7b94f4090c1..e4d3aecc4ed2c0fd61b1c68a93f20a24b808bd8c 100644 (file)
                                #size-cells = <0>;
                                compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
                                reg = <0x43fa4000 0x4000>;
-                               clocks = <&clks 62>, <&clks 62>;
+                               clocks = <&clks 78>, <&clks 78>;
                                clock-names = "ipg", "per";
                                interrupts = <14>;
                                status = "disabled";
                                compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
                                #pwm-cells = <2>;
                                reg = <0x53fa0000 0x4000>;
-                               clocks = <&clks 106>, <&clks 36>;
+                               clocks = <&clks 106>, <&clks 52>;
                                clock-names = "ipg", "per";
                                interrupts = <36>;
                        };
                                compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
                                #pwm-cells = <2>;
                                reg = <0x53fa8000 0x4000>;
-                               clocks = <&clks 107>, <&clks 36>;
+                               clocks = <&clks 107>, <&clks 52>;
                                clock-names = "ipg", "per";
                                interrupts = <41>;
                        };
                        pwm4: pwm@53fc8000 {
                                compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
                                reg = <0x53fc8000 0x4000>;
-                               clocks = <&clks 108>, <&clks 36>;
+                               clocks = <&clks 108>, <&clks 52>;
                                clock-names = "ipg", "per";
                                interrupts = <42>;
                        };
                                compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
                                #pwm-cells = <2>;
                                reg = <0x53fe0000 0x4000>;
-                               clocks = <&clks 105>, <&clks 36>;
+                               clocks = <&clks 105>, <&clks 52>;
                                clock-names = "ipg", "per";
                                interrupts = <26>;
                        };
index 56569cecaa7852795ab94f6046321f13bddd837e..649befeb2cf96ef4b968f809a98d5d718227b002 100644 (file)
                #address-cells = <1>;
                #size-cells = <0>;
 
-               reg_usbh1_vbus: regulator@0 {
-                       compatible = "regulator-fixed";
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&pinctrl_usbh1reg>;
-                       reg = <0>;
-                       regulator-name = "usbh1_vbus";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       gpio = <&gpio2 5 GPIO_ACTIVE_HIGH>;
-                       enable-active-high;
-               };
-
-               reg_usbotg_vbus: regulator@1 {
+               reg_hub_reset: regulator@0 {
                        compatible = "regulator-fixed";
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_usbotgreg>;
-                       reg = <1>;
-                       regulator-name = "usbotg_vbus";
+                       reg = <0>;
+                       regulator-name = "hub_reset";
                        regulator-min-microvolt = <5000000>;
                        regulator-max-microvolt = <5000000>;
                        gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>;
                        reg = <0>;
                        clocks = <&clks IMX5_CLK_DUMMY>;
                        clock-names = "main_clk";
+                       reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>;
                };
        };
 };
 &usbh1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usbh1>;
-       vbus-supply = <&reg_usbh1_vbus>;
+       vbus-supply = <&reg_hub_reset>;
        fsl,usbphy = <&usbh1phy>;
        phy_type = "ulpi";
        status = "okay";
        dr_mode = "otg";
        disable-over-current;
        phy_type = "utmi_wide";
-       vbus-supply = <&reg_usbotg_vbus>;
        status = "okay";
 };
 
index 327d362fe275519ad7136279f319ed700b336938..009abd69385d854c15c4b35bc98aca87a1ef8d84 100644 (file)
@@ -67,7 +67,6 @@
        phy-mode = "rgmii";
        interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
                              <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
-       fsl,magic-packet;
        status = "okay";
 };
 
index 6bfd0bc6e65870105a90a767a3269d7061d08411..f1cd2147421d2e0f82e2a921cc5c449d8ff6d1b4 100644 (file)
        pinctrl-0 = <&pinctrl_enet>;
        phy-mode = "rgmii";
        phy-reset-gpios = <&gpio1 25 0>;
-       fsl,magic-packet;
        status = "okay";
 };
 
index 4fc03b7f1ceec52fe5d327974cd2929127de21f9..2109d0763c1b6dca448449ba74709c6ced666cbb 100644 (file)
                        vpu: vpu@02040000 {
                                compatible = "cnm,coda960";
                                reg = <0x02040000 0x3c000>;
-                               interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>,
-                                            <0 12 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <0 12 IRQ_TYPE_LEVEL_HIGH>,
+                                            <0 3 IRQ_TYPE_LEVEL_HIGH>;
                                interrupt-names = "bit", "jpeg";
                                clocks = <&clks IMX6QDL_CLK_VPU_AXI>,
                                         <&clks IMX6QDL_CLK_MMDC_CH0_AXI>,
index 1e6e5cc1c14cf283fb8b3bd9321f44218fbe4fb3..c108bb451337ee4c5108847192d2991d78a4c68b 100644 (file)
        pinctrl-0 = <&pinctrl_enet1>;
        phy-supply = <&reg_enet_3v3>;
        phy-mode = "rgmii";
+       phy-handle = <&ethphy1>;
        status = "okay";
+
+       mdio {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               ethphy1: ethernet-phy@1 {
+                       reg = <1>;
+               };
+
+               ethphy2: ethernet-phy@2 {
+                       reg = <2>;
+               };
+       };
 };
 
 &fec2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_enet2>;
        phy-mode = "rgmii";
+       phy-handle = <&ethphy2>;
        status = "okay";
 };
 
index 657da14cb4b5b2cc96bc1b4139c871f5fac65307..c70bb27ac65a63e1f197408e4a9ccb822d3407c4 100644 (file)
                scfg: scfg@1570000 {
                        compatible = "fsl,ls1021a-scfg", "syscon";
                        reg = <0x0 0x1570000 0x0 0x10000>;
+                       big-endian;
                };
 
                clockgen: clocking@1ee1000 {
index 53f3ca064140470866dbfe12773af1ddb76d1632..b550c41b46f1ecc83fcc9abf551fbeff4f44e2a0 100644 (file)
                };
        };
 
+       /* Ethernet is on some early development boards and qemu */
        ethernet@gpmc {
                compatible = "smsc,lan91c94";
-
-               status = "disabled";
-
                interrupt-parent = <&gpio2>;
                interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;  /* gpio54 */
                reg = <1 0x300 0xf>;            /* 16 byte IO range at offset 0x300 */
index 831a7aa85136268ac7802a8242e67bfc81829a45..e1d3eeb8f094139ddc7ef67bea911cfd69a76caa 100644 (file)
 };
 
 &gmac {
-       phy_regulator = "vcc_phy";
+       phy-supply = <&vcc_phy>;
        phy-mode = "rgmii";
        clock_in_out = "input";
        snps,reset-gpio = <&gpio4 7 0>;
index 048cb170c884ee16f924cfb2abbff11534126c40..1c08eb0ecdb9c345d0cf9369964173f080e4278d 100644 (file)
@@ -98,6 +98,8 @@
                pinctrl-names = "default";
                pinctrl-0 = <&eth_phy_pwr>;
                regulator-name = "vcc_phy";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
                regulator-always-on;
                regulator-boot-on;
        };
 };
 
 &pinctrl {
+       pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma {
+               drive-strength = <8>;
+       };
+
+       pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma {
+               bias-pull-up;
+               drive-strength = <8>;
+       };
+
        backlight {
                bl_en: bl-en {
                        rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_pull_none>;
                };
        };
 
+       sdmmc {
+               /*
+                * Default drive strength isn't enough to achieve even
+                * high-speed mode on EVB board so bump up to 8ma.
+                */
+               sdmmc_bus4: sdmmc-bus4 {
+                       rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+                                       <6 17 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+                                       <6 18 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+                                       <6 19 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+               };
+
+               sdmmc_clk: sdmmc-clk {
+                       rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+               };
+
+               sdmmc_cmd: sdmmc-cmd {
+                       rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+               };
+       };
+
        usb {
                host_vbus_drv: host-vbus-drv {
                        rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
index 49c10d33df302b7d967f0861c1424f636026a943..77e03655aca3626ad369b81b572b755987fd1609 100644 (file)
                        "Headphone Jack", "HPOUTR",
                        "IN2L", "Line In Jack",
                        "IN2R", "Line In Jack",
-                       "MICBIAS", "IN1L",
+                       "Mic", "MICBIAS",
                        "IN1L", "Mic";
 
                atmel,ssc-controller = <&ssc0>;
index 1b0f30c2c4a58d907aafd9d54e80d11e80d5722c..b94995d1889fc3ca780d69ac79a9c3be53620189 100644 (file)
 
                        pit: timer@fc068630 {
                                compatible = "atmel,at91sam9260-pit";
-                               reg = <0xfc068630 0xf>;
+                               reg = <0xfc068630 0x10>;
                                interrupts = <3 IRQ_TYPE_LEVEL_HIGH 5>;
                                clocks = <&h32ck>;
                        };
index a8c00ee7522a1872ee5af06debe66c6acdba49f8..3d0b8755caeee62f77ac214d343ab40cebba2ce0 100644 (file)
                stmpe2401_1 {
                        stmpe2401_1_nhk_mode: stmpe2401_1_nhk {
                                nhk_cfg1 {
-                                       ste,pins = "GPIO76_B20"; // IRQ line
+                                       pins = "GPIO76_B20"; // IRQ line
                                        ste,input = <0>;
                                };
                                nhk_cfg2 {
-                                       ste,pins = "GPIO77_B8"; // reset line
+                                       pins = "GPIO77_B8"; // reset line
                                        ste,output = <1>;
                                };
                        };
                stmpe2401_2 {
                        stmpe2401_2_nhk_mode: stmpe2401_2_nhk {
                                nhk_cfg1 {
-                                       ste,pins = "GPIO78_A8"; // IRQ line
+                                       pins = "GPIO78_A8"; // IRQ line
                                        ste,input = <0>;
                                };
                                nhk_cfg2 {
-                                       ste,pins = "GPIO79_C9"; // reset line
+                                       pins = "GPIO79_C9"; // reset line
                                        ste,output = <1>;
                                };
                        };
index 3e31d32133b858d399b2cbca4eb6f01c97eaafd8..d4a8f843cdc8a320e0f75be1c58f1c377676c327 100644 (file)
 
                        status = "disabled";
                };
+
+               usb2_picophy0: phy1 {
+                       compatible = "st,stih407-usb2-phy";
+                       #phy-cells = <0>;
+                       st,syscfg = <&syscfg_core 0x100 0xf4>;
+                       resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
+                                <&picophyreset STIH407_PICOPHY0_RESET>;
+                       reset-names = "global", "port";
+               };
        };
 };
index c05627eb717d7040dda4fbd3948d9731965a624c..37995f4739d2d4ac51cf9ccba764b186451a9e27 100644 (file)
 #include "stih407-family.dtsi"
 #include "stih410-pinctrl.dtsi"
 / {
+       soc {
+               usb2_picophy1: phy2 {
+                       compatible = "st,stih407-usb2-phy";
+                       #phy-cells = <0>;
+                       st,syscfg = <&syscfg_core 0xf8 0xf4>;
+                       resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
+                                <&picophyreset STIH407_PICOPHY0_RESET>;
+                       reset-names = "global", "port";
+               };
 
+               usb2_picophy2: phy3 {
+                       compatible = "st,stih407-usb2-phy";
+                       #phy-cells = <0>;
+                       st,syscfg = <&syscfg_core 0xfc 0xf4>;
+                       resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
+                                <&picophyreset STIH407_PICOPHY1_RESET>;
+                       reset-names = "global", "port";
+               };
+
+               ohci0: usb@9a03c00 {
+                       compatible = "st,st-ohci-300x";
+                       reg = <0x9a03c00 0x100>;
+                       interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
+                                <&softreset STIH407_USB2_PORT0_SOFTRESET>;
+                       reset-names = "power", "softreset";
+                       phys = <&usb2_picophy1>;
+                       phy-names = "usb";
+               };
+
+               ehci0: usb@9a03e00 {
+                       compatible = "st,st-ehci-300x";
+                       reg = <0x9a03e00 0x100>;
+                       interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&pinctrl_usb0>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
+                                <&softreset STIH407_USB2_PORT0_SOFTRESET>;
+                       reset-names = "power", "softreset";
+                       phys = <&usb2_picophy1>;
+                       phy-names = "usb";
+               };
+
+               ohci1: usb@9a83c00 {
+                       compatible = "st,st-ohci-300x";
+                       reg = <0x9a83c00 0x100>;
+                       interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
+                                <&softreset STIH407_USB2_PORT1_SOFTRESET>;
+                       reset-names = "power", "softreset";
+                       phys = <&usb2_picophy2>;
+                       phy-names = "usb";
+               };
+
+               ehci1: usb@9a83e00 {
+                       compatible = "st,st-ehci-300x";
+                       reg = <0x9a83e00 0x100>;
+                       interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&pinctrl_usb1>;
+                       clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+                       resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
+                                <&softreset STIH407_USB2_PORT1_SOFTRESET>;
+                       reset-names = "power", "softreset";
+                       phys = <&usb2_picophy2>;
+                       phy-names = "usb";
+               };
+       };
 };
index 9198c12765ea0eb49de1a0b53545b6819b4b48bc..19b019b5f30e6aab57d5a793c06a2fdc834b43d3 100644 (file)
                        compatible      = "st,stih415-dwmac", "snps,dwmac", "snps,dwmac-3.610";
                        status          = "disabled";
 
-                       reg             = <0xfe810000 0x8000>, <0x148 0x4>;
-                       reg-names       = "stmmaceth", "sti-ethconf";
+                       reg             = <0xfe810000 0x8000>;
+                       reg-names       = "stmmaceth";
 
                        interrupts      = <0 147 0>, <0 148 0>, <0 149 0>;
                        interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
                        snps,mixed-burst;
                        snps,force_sf_dma_mode;
 
-                       st,syscon       = <&syscfg_rear>;
+                       st,syscon       = <&syscfg_rear 0x148>;
 
                        pinctrl-names   = "default";
                        pinctrl-0       = <&pinctrl_mii0>;
                        device_type = "network";
                        compatible      = "st,stih415-dwmac", "snps,dwmac", "snps,dwmac-3.610";
                        status          = "disabled";
-                       reg             = <0xfef08000 0x8000>, <0x74 0x4>;
-                       reg-names       = "stmmaceth", "sti-ethconf";
+                       reg             = <0xfef08000 0x8000>;
+                       reg-names       = "stmmaceth";
                        interrupts      = <0 150 0>, <0 151 0>, <0 152 0>;
                        interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
 
                        snps,mixed-burst;
                        snps,force_sf_dma_mode;
 
-                       st,syscon               = <&syscfg_sbc>;
+                       st,syscon               = <&syscfg_sbc 0x74>;
 
                        resets                  = <&softreset STIH415_ETH1_SOFTRESET>;
                        reset-names             = "stmmaceth";
index fad9073ddeed1a3e4193f30140bb8c30773fd955..ea28ebadab1a18ed933288a6fb340de2afe63293 100644 (file)
                        device_type     = "network";
                        compatible      = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
                        status          = "disabled";
-                       reg             = <0xfe810000 0x8000>, <0x8bc 0x4>;
-                       reg-names       = "stmmaceth", "sti-ethconf";
+                       reg             = <0xfe810000 0x8000>;
+                       reg-names       = "stmmaceth";
 
                        interrupts = <0 133 0>, <0 134 0>, <0 135 0>;
                        interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
                        snps,pbl        = <32>;
                        snps,mixed-burst;
 
-                       st,syscon               = <&syscfg_rear>;
+                       st,syscon               = <&syscfg_rear 0x8bc>;
                        resets                  = <&softreset STIH416_ETH0_SOFTRESET>;
                        reset-names             = "stmmaceth";
                        pinctrl-names   = "default";
                        device_type = "network";
                        compatible              = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
                        status          = "disabled";
-                       reg             = <0xfef08000 0x8000>, <0x7f0 0x4>;
-                       reg-names       = "stmmaceth", "sti-ethconf";
+                       reg             = <0xfef08000 0x8000>;
+                       reg-names       = "stmmaceth";
                        interrupts = <0 136 0>, <0 137 0>, <0 138 0>;
                        interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
 
                        snps,pbl        = <32>;
                        snps,mixed-burst;
 
-                       st,syscon       = <&syscfg_sbc>;
+                       st,syscon       = <&syscfg_sbc 0x7f0>;
 
                        resets          = <&softreset STIH416_ETH1_SOFTRESET>;
                        reset-names     = "stmmaceth";
 
                miphy365x_phy: phy@fe382000 {
                        compatible      = "st,miphy365x-phy";
-                       st,syscfg       = <&syscfg_rear>;
+                       st,syscfg       = <&syscfg_rear 0x824 0x828>;
                        #address-cells  = <1>;
                        #size-cells     = <1>;
                        ranges;
 
                        phy_port0: port@fe382000 {
                                #phy-cells = <1>;
-                               reg = <0xfe382000 0x100>, <0xfe394000 0x100>, <0x824 0x4>;
-                               reg-names = "sata", "pcie", "syscfg";
+                               reg = <0xfe382000 0x100>, <0xfe394000 0x100>;
+                               reg-names = "sata", "pcie";
                        };
 
                        phy_port1: port@fe38a000 {
                                #phy-cells = <1>;
-                               reg = <0xfe38a000 0x100>, <0xfe804000 0x100>, <0x828 0x4>;
-                               reg-names = "sata", "pcie", "syscfg";
+                               reg = <0xfe38a000 0x100>, <0xfe804000 0x100>;
+                               reg-names = "sata", "pcie";
                        };
                };
 
index 7b4099fcf81788714def505ff009e2b7a4948db2..d5c4669224b1734178f1a04efd42e9d5ec9cda27 100644 (file)
 
        aliases {
                ethernet0 = &emac;
-               serial0 = &uart0;
-               serial1 = &uart1;
-               serial2 = &uart2;
-               serial3 = &uart3;
-               serial4 = &uart4;
-               serial5 = &uart5;
-               serial6 = &uart6;
-               serial7 = &uart7;
        };
 
        chosen {
                                 <&ahb_gates 44>;
                        status = "disabled";
                };
+
+               framebuffer@1 {
+                       compatible = "allwinner,simple-framebuffer", "simple-framebuffer";
+                       allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
+                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
+                                <&ahb_gates 44>, <&ahb_gates 46>;
+                       status = "disabled";
+               };
        };
 
        cpus {
                        reg-names = "phy_ctrl", "pmu1", "pmu2";
                        clocks = <&usb_clk 8>;
                        clock-names = "usb_phy";
-                       resets = <&usb_clk 1>, <&usb_clk 2>;
-                       reset-names = "usb1_reset", "usb2_reset";
+                       resets = <&usb_clk 0>, <&usb_clk 1>, <&usb_clk 2>;
+                       reset-names = "usb0_reset", "usb1_reset", "usb2_reset";
                        status = "disabled";
                };
 
index fe3c559ca6a8e6d24412abe6d2a81c2a6b97bdda..bfa742817690d823d2044d3e6397f2237abb8cd9 100644 (file)
        model = "Olimex A10s-Olinuxino Micro";
        compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s";
 
+       aliases {
+               serial0 = &uart0;
+               serial1 = &uart2;
+               serial2 = &uart3;
+       };
+
        soc@01c00000 {
                emac: ethernet@01c0b000 {
                        pinctrl-names = "default";
index 1b76667f3182694ffb7d055cb8f8e7230809a0bf..2e7d8263799d77fe864a4776285844deb6a21fa3 100644 (file)
 
        aliases {
                ethernet0 = &emac;
-               serial0 = &uart0;
-               serial1 = &uart1;
-               serial2 = &uart2;
-               serial3 = &uart3;
        };
 
        chosen {
                        reg-names = "phy_ctrl", "pmu1";
                        clocks = <&usb_clk 8>;
                        clock-names = "usb_phy";
-                       resets = <&usb_clk 1>;
-                       reset-names = "usb1_reset";
+                       resets = <&usb_clk 0>, <&usb_clk 1>;
+                       reset-names = "usb0_reset", "usb1_reset";
                        status = "disabled";
                };
 
index eeed1f236ee8c400465cbe87824516462dc6ec63..c7be3abd9fcc31a9ab3280b0a7b4a01ba73880b4 100644 (file)
        model = "HSG H702";
        compatible = "hsg,h702", "allwinner,sun5i-a13";
 
+       aliases {
+               serial0 = &uart1;
+       };
+
        soc@01c00000 {
                mmc0: mmc@01c0f000 {
                        pinctrl-names = "default";
index 916ee8bb826f7186380ecaf967d55488f84db6aa..3decefb3c37ac438c27574e7aed940fa2a684ea8 100644 (file)
        model = "Olimex A13-Olinuxino Micro";
        compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13";
 
+       aliases {
+               serial0 = &uart1;
+       };
+
        soc@01c00000 {
                mmc0: mmc@01c0f000 {
                        pinctrl-names = "default";
index e31d291d14cbcd22add60622a9a2e83b8206ea11..b421f7fa197b475f7e0e3e23637c1acf6a7624ec 100644 (file)
        model = "Olimex A13-Olinuxino";
        compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13";
 
+       aliases {
+               serial0 = &uart1;
+       };
+
        soc@01c00000 {
                mmc0: mmc@01c0f000 {
                        pinctrl-names = "default";
index c35217ea1f6473653b4d67ce953f3c761fa043a0..c556688f8b8ba400a7bef986690e40a633f836e2 100644 (file)
 / {
        interrupt-parent = <&intc>;
 
-       aliases {
-               serial0 = &uart1;
-               serial1 = &uart3;
-       };
-
        cpus {
                #address-cells = <1>;
                #size-cells = <0>;
                        reg-names = "phy_ctrl", "pmu1";
                        clocks = <&usb_clk 8>;
                        clock-names = "usb_phy";
-                       resets = <&usb_clk 1>;
-                       reset-names = "usb1_reset";
+                       resets = <&usb_clk 0>, <&usb_clk 1>;
+                       reset-names = "usb0_reset", "usb1_reset";
                        status = "disabled";
                };
 
index f47156b6572bbaf09872686b70f09268200c3cc9..1e7e7bcf83071f005b2fd57c5fc8112432104f35 100644 (file)
        interrupt-parent = <&gic>;
 
        aliases {
-               serial0 = &uart0;
-               serial1 = &uart1;
-               serial2 = &uart2;
-               serial3 = &uart3;
-               serial4 = &uart4;
-               serial5 = &uart5;
                ethernet0 = &gmac;
        };
 
index 1cf1214cc068f2e197335cfbfc10d1bf9054636d..bd7b15add6972d26fbcffdfa010fe3435141c0b5 100644 (file)
        model = "LeMaker Banana Pi";
        compatible = "lemaker,bananapi", "allwinner,sun7i-a20";
 
+       aliases {
+               serial0 = &uart0;
+               serial1 = &uart3;
+               serial2 = &uart7;
+       };
+
        soc@01c00000 {
                spi0: spi@01c05000 {
                        pinctrl-names = "default";
index 0e4bfa3b2b8540b361b07f9cf1c943c4cba8a9c6..0bcefcbbb756e0b7d7e8ad7450413e0c01c2a273 100644 (file)
        model = "Merrii A20 Hummingbird";
        compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20";
 
+       aliases {
+               serial0 = &uart0;
+               serial1 = &uart2;
+               serial2 = &uart3;
+               serial3 = &uart4;
+               serial4 = &uart5;
+       };
+
        soc@01c00000 {
                mmc0: mmc@01c0f000 {
                        pinctrl-names = "default";
index 9d669cdf031d1aa1ea78c2d8a7c23b713dff3f98..66cc7770719867d8331b30b296611a1921bfac9b 100644 (file)
@@ -20,6 +20,9 @@
        compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20";
 
        aliases {
+               serial0 = &uart0;
+               serial1 = &uart6;
+               serial2 = &uart7;
                spi0 = &spi1;
                spi1 = &spi2;
        };
index e21ce5992d565c348ae3b798ad232cd8f6c16c6b..89749ce34a844ef8777649a712232631534467d1 100644 (file)
 
        aliases {
                ethernet0 = &gmac;
-               serial0 = &uart0;
-               serial1 = &uart1;
-               serial2 = &uart2;
-               serial3 = &uart3;
-               serial4 = &uart4;
-               serial5 = &uart5;
-               serial6 = &uart6;
-               serial7 = &uart7;
        };
 
        chosen {
index 7f2117ce6985b1de46cfd2fbfbc42fb76e19a1d7..32ad80804dbbc3d31adb703b373c62e9cf597beb 100644 (file)
        model = "Ippo Q8H Dual Core Tablet (v5)";
        compatible = "ippo,q8h-v5", "allwinner,sun8i-a23";
 
+       aliases {
+               serial0 = &r_uart;
+       };
+
        chosen {
                bootargs = "earlyprintk console=ttyS0,115200";
        };
index 0746cd1024d7a73b32bcbf6002954a4859d77ee5..86584fcf5e323bc43489c79be74e43ef8931d3a2 100644 (file)
 / {
        interrupt-parent = <&gic>;
 
-       aliases {
-               serial0 = &uart0;
-               serial1 = &uart1;
-               serial2 = &uart2;
-               serial3 = &uart3;
-               serial4 = &uart4;
-               serial5 = &r_uart;
-       };
-
        cpus {
                #address-cells = <1>;
                #size-cells = <0>;
index 506948f582eee37a11fdb17ea5a7f5eb7277b390..11ec71072e815ac5b4e54a3db3cace2ee81b78b8 100644 (file)
        model = "Merrii A80 Optimus Board";
        compatible = "merrii,a80-optimus", "allwinner,sun9i-a80";
 
+       aliases {
+               serial0 = &uart0;
+               serial1 = &uart4;
+       };
+
        chosen {
                bootargs = "earlyprintk console=ttyS0,115200";
        };
index 494714f67b57821816f1b484659765aca58b8eb0..9ef4438206a9986ed82b733ebeaf961f474b742a 100644 (file)
 / {
        interrupt-parent = <&gic>;
 
-       aliases {
-               serial0 = &uart0;
-               serial1 = &uart1;
-               serial2 = &uart2;
-               serial3 = &uart3;
-               serial4 = &uart4;
-               serial5 = &uart5;
-               serial6 = &r_uart;
-       };
-
        cpus {
                #address-cells = <1>;
                #size-cells = <0>;
index ea282c7c0ca5645394a28e313fbad1deac339882..e2fed27122497b6f330904f43de95739b6cbe6cb 100644 (file)
                clock-frequency = <400000>;
 
                magnetometer@c {
-                       compatible = "ak,ak8975";
+                       compatible = "asahi-kasei,ak8975";
                        reg = <0xc>;
                        interrupt-parent = <&gpio>;
                        interrupts = <TEGRA_GPIO(N, 5) IRQ_TYPE_LEVEL_HIGH>;
index a0f762159cb26501b517a1af0d529da8abc42283..f2b64b1b00fa5231fc2493164fcc89f434687cd1 100644 (file)
 
 &fec0 {
        phy-mode = "rmii";
+       phy-handle = <&ethphy0>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_fec0>;
        status = "okay";
+
+       mdio {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               ethphy0: ethernet-phy@0 {
+                       reg = <0>;
+               };
+
+               ethphy1: ethernet-phy@1 {
+                       reg = <1>;
+               };
+       };
 };
 
 &fec1 {
        phy-mode = "rmii";
+       phy-handle = <&ethphy1>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_fec1>;
        status = "okay";
index 5ef14de00a29ba2f433fc93a47d95912569817c8..3d0c5d65c741933fad5947aa028dfdf11f7eda4d 100644 (file)
@@ -84,7 +84,8 @@ CONFIG_DEBUG_GPIO=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_BATTERY_SBS=y
 CONFIG_CHARGER_TPS65090=y
-# CONFIG_HWMON is not set
+CONFIG_HWMON=y
+CONFIG_SENSORS_LM90=y
 CONFIG_THERMAL=y
 CONFIG_EXYNOS_THERMAL=y
 CONFIG_EXYNOS_THERMAL_CORE=y
@@ -109,11 +110,26 @@ CONFIG_REGULATOR_S2MPA01=y
 CONFIG_REGULATOR_S2MPS11=y
 CONFIG_REGULATOR_S5M8767=y
 CONFIG_REGULATOR_TPS65090=y
+CONFIG_DRM=y
+CONFIG_DRM_BRIDGE=y
+CONFIG_DRM_PTN3460=y
+CONFIG_DRM_PS8622=y
+CONFIG_DRM_EXYNOS=y
+CONFIG_DRM_EXYNOS_FIMD=y
+CONFIG_DRM_EXYNOS_DP=y
+CONFIG_DRM_PANEL=y
+CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_FB=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_FB_SIMPLE=y
 CONFIG_EXYNOS_VIDEO=y
 CONFIG_EXYNOS_MIPI_DSI=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+CONFIG_BACKLIGHT_PWM=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FONTS=y
 CONFIG_FONT_7x14=y
index 2328fe752e9c5ed6066534a0332724b3c4f8d8e5..444685c44055be79519ccaea01ed73caad839bd9 100644 (file)
@@ -338,6 +338,7 @@ CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_XHCI_MVEBU=y
 CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
 CONFIG_USB_EHCI_TEGRA=y
 CONFIG_USB_EHCI_HCD_STI=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
@@ -455,6 +456,7 @@ CONFIG_OMAP_USB2=y
 CONFIG_TI_PIPE3=y
 CONFIG_PHY_MIPHY365X=y
 CONFIG_PHY_STIH41X_USB=y
+CONFIG_PHY_STIH407_USB=y
 CONFIG_PHY_SUN4I_USB=y
 CONFIG_EXT4_FS=y
 CONFIG_AUTOFS4_FS=y
index c2c3a852af9fcb28a4fc03bf69322fad6cf52f79..667d9d52aa01aaa230bd2e12a9b4575778285f38 100644 (file)
@@ -68,7 +68,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-CONFIG_GENERIC_CPUFREQ_CPU0=y
+CONFIG_CPUFREQ_DT=y
 # CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set
 CONFIG_CPU_IDLE=y
 CONFIG_BINFMT_MISC=y
index 66ce17655bb9e29a73f58ebd9a4716735fcf9739..7b0152321b20baa3178d6f9135f12cb62a27987f 100644 (file)
@@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
        vcpu->arch.hcr = HCR_GUEST_MASK;
 }
 
+static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.hcr;
+}
+
+static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
+{
+       vcpu->arch.hcr = hcr;
+}
+
 static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
 {
        return 1;
index 254e0650e48bbc1d07a81d091c8e33570f983851..04b4ea0b550a111811876369bce25358f4d1965e 100644 (file)
@@ -125,9 +125,6 @@ struct kvm_vcpu_arch {
         * Anything that is not used directly from assembly code goes
         * here.
         */
-       /* dcache set/way operation pending */
-       int last_pcpu;
-       cpumask_t require_dcache_flush;
 
        /* Don't run the guest on this vcpu */
        bool pause;
index 63e0ecc0490180e8b8b5e548b6b6a17b95b725e1..1bca8f8af4424154d69289bb32793a42ff223d30 100644 (file)
@@ -44,6 +44,7 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/highmem.h>
 #include <asm/cacheflush.h>
 #include <asm/pgalloc.h>
 
@@ -161,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
        return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
 }
 
-static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
-                                            unsigned long size,
-                                            bool ipa_uncached)
+static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+                                              unsigned long size,
+                                              bool ipa_uncached)
 {
-       if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
-               kvm_flush_dcache_to_poc((void *)hva, size);
-       
        /*
         * If we are going to insert an instruction page and the icache is
         * either VIPT or PIPT, there is a potential problem where the host
@@ -179,18 +177,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
         *
         * VIVT caches are tagged using both the ASID and the VMID and doesn't
         * need any kind of flushing (DDI 0406C.b - Page B3-1392).
+        *
+        * We need to do this through a kernel mapping (using the
+        * user-space mapping has proved to be the wrong
+        * solution). For that, we need to kmap one page at a time,
+        * and iterate over the range.
         */
-       if (icache_is_pipt()) {
-               __cpuc_coherent_user_range(hva, hva + size);
-       } else if (!icache_is_vivt_asid_tagged()) {
+
+       bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
+
+       VM_BUG_ON(size & PAGE_MASK);
+
+       if (!need_flush && !icache_is_pipt())
+               goto vipt_cache;
+
+       while (size) {
+               void *va = kmap_atomic_pfn(pfn);
+
+               if (need_flush)
+                       kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+               if (icache_is_pipt())
+                       __cpuc_coherent_user_range((unsigned long)va,
+                                                  (unsigned long)va + PAGE_SIZE);
+
+               size -= PAGE_SIZE;
+               pfn++;
+
+               kunmap_atomic(va);
+       }
+
+vipt_cache:
+       if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
                /* any kind of VIPT cache */
                __flush_icache_all();
        }
 }
 
+static inline void __kvm_flush_dcache_pte(pte_t pte)
+{
+       void *va = kmap_atomic(pte_page(pte));
+
+       kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+       kunmap_atomic(va);
+}
+
+static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+{
+       unsigned long size = PMD_SIZE;
+       pfn_t pfn = pmd_pfn(pmd);
+
+       while (size) {
+               void *va = kmap_atomic_pfn(pfn);
+
+               kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+               pfn++;
+               size -= PAGE_SIZE;
+
+               kunmap_atomic(va);
+       }
+}
+
+static inline void __kvm_flush_dcache_pud(pud_t pud)
+{
+}
+
 #define kvm_virt_to_phys(x)            virt_to_idmap((unsigned long)(x))
 
-void stage2_flush_vm(struct kvm *kvm);
+void kvm_set_way_flush(struct kvm_vcpu *vcpu);
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
 
 #endif /* !__ASSEMBLY__ */
 
index 705bb7620673a10222e3258d94158ab8ec1555c9..0c3f5a0dafd32c04af58eec20e6af09f2efca0fe 100644 (file)
 #define __NR_getrandom                 (__NR_SYSCALL_BASE+384)
 #define __NR_memfd_create              (__NR_SYSCALL_BASE+385)
 #define __NR_bpf                       (__NR_SYSCALL_BASE+386)
+#define __NR_execveat                  (__NR_SYSCALL_BASE+387)
 
 /*
  * The following SWIs are ARM private.
index e51833f8cc387118ae3826a0a78a533f4ff90a5f..05745eb838c599dc2dd6034a71b8ebec619b9995 100644 (file)
                CALL(sys_getrandom)
 /* 385 */      CALL(sys_memfd_create)
                CALL(sys_bpf)
+               CALL(sys_execveat)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index 4176df721bf09bace95bad96d1c194e5b6b7a038..1a0045abead7562be1e27163e0aee3c6afbe9b40 100644 (file)
        .endm
 
        .macro  restore_user_regs, fast = 0, offset = 0
-       ldr     r1, [sp, #\offset + S_PSR]      @ get calling cpsr
-       ldr     lr, [sp, #\offset + S_PC]!      @ get pc
+       mov     r2, sp
+       ldr     r1, [r2, #\offset + S_PSR]      @ get calling cpsr
+       ldr     lr, [r2, #\offset + S_PC]!      @ get pc
        msr     spsr_cxsf, r1                   @ save in spsr_svc
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
        @ We must avoid clrex due to Cortex-A15 erratum #830321
-       strex   r1, r2, [sp]                    @ clear the exclusive monitor
+       strex   r1, r2, [r2]                    @ clear the exclusive monitor
 #endif
        .if     \fast
-       ldmdb   sp, {r1 - lr}^                  @ get calling r1 - lr
+       ldmdb   r2, {r1 - lr}^                  @ get calling r1 - lr
        .else
-       ldmdb   sp, {r0 - lr}^                  @ get calling r0 - lr
+       ldmdb   r2, {r0 - lr}^                  @ get calling r0 - lr
        .endif
        mov     r0, r0                          @ ARMv5T and earlier require a nop
                                                @ after ldm {}^
-       add     sp, sp, #S_FRAME_SIZE - S_PC
+       add     sp, sp, #\offset + S_FRAME_SIZE
        movs    pc, lr                          @ return & move spsr_svc into cpsr
        .endm
 
index 2260f1855820fa2d2961025b2683c2e30982a8e0..8944f4991c3cfd4e76585cdc11b78e3c3b2257d2 100644 (file)
 
 __invalid_entry:
        v7m_exception_entry
+#ifdef CONFIG_PRINTK
        adr     r0, strerr
        mrs     r1, ipsr
        mov     r2, lr
        bl      printk
+#endif
        mov     r0, sp
        bl      show_regs
 1:     b       1b
index f7c65adaa428c9eabd2c5080ae097a374c2206f5..557e128e4df08ce711d4bab89952eae0f1d6d7ea 100644 (file)
@@ -116,8 +116,14 @@ int armpmu_event_set_period(struct perf_event *event)
                ret = 1;
        }
 
-       if (left > (s64)armpmu->max_period)
-               left = armpmu->max_period;
+       /*
+        * Limit the maximum period to prevent the counter value
+        * from overtaking the one we are about to program. In
+        * effect we are reducing max_period to account for
+        * interrupt latency (and we are being very conservative).
+        */
+       if (left > (armpmu->max_period >> 1))
+               left = armpmu->max_period >> 1;
 
        local64_set(&hwc->prev_count, (u64)-left);
 
index 6e4379c67cbc191e58fa28c4dbf25b10f887c536..592dda3f21fff05f7024abbcebbe2e55bc44947f 100644 (file)
@@ -28,3 +28,11 @@ u64 perf_reg_abi(struct task_struct *task)
 {
        return PERF_SAMPLE_REGS_ABI_32;
 }
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+                       struct pt_regs *regs,
+                       struct pt_regs *regs_user_copy)
+{
+       regs_user->regs = task_pt_regs(current);
+       regs_user->abi = perf_reg_abi(current);
+}
index f9c863911038ac7d2cbdb5c3d154edd482df8d8a..e55408e965596964ff8c8708dcfec529a559b1bc 100644 (file)
@@ -657,10 +657,13 @@ int __init arm_add_memory(u64 start, u64 size)
 
        /*
         * Ensure that start/size are aligned to a page boundary.
-        * Size is appropriately rounded down, start is rounded up.
+        * Size is rounded down, start is rounded up.
         */
-       size -= start & ~PAGE_MASK;
        aligned_start = PAGE_ALIGN(start);
+       if (aligned_start > start + size)
+               size = 0;
+       else
+               size -= aligned_start - start;
 
 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
        if (aligned_start > ULONG_MAX) {
@@ -1046,6 +1049,15 @@ static int c_show(struct seq_file *m, void *v)
                seq_printf(m, "model name\t: %s rev %d (%s)\n",
                           cpu_name, cpuid & 15, elf_platform);
 
+#if defined(CONFIG_SMP)
+               seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+                          per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
+                          (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
+#else
+               seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+                          loops_per_jiffy / (500000/HZ),
+                          (loops_per_jiffy / (5000/HZ)) % 100);
+#endif
                /* dump out the processor features */
                seq_puts(m, "Features\t: ");
 
index 5e6052e18850a9d04071bbaf03f595fc8ecf00e0..86ef244c5a24b4fa80b20da26c5d522832a61d59 100644 (file)
@@ -387,6 +387,18 @@ asmlinkage void secondary_start_kernel(void)
 
 void __init smp_cpus_done(unsigned int max_cpus)
 {
+       int cpu;
+       unsigned long bogosum = 0;
+
+       for_each_online_cpu(cpu)
+               bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
+
+       printk(KERN_INFO "SMP: Total of %d processors activated "
+              "(%lu.%02lu BogoMIPS).\n",
+              num_online_cpus(),
+              bogosum / (500000/HZ),
+              (bogosum / (5000/HZ)) % 100);
+
        hyp_mode_check();
 }
 
index 2d6d91001062f975981dd9beed2b43815f73c238..0b0d58a905c43ba05afc61ca06341bfab348b528 100644 (file)
@@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        vcpu->cpu = cpu;
        vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
 
-       /*
-        * Check whether this vcpu requires the cache to be flushed on
-        * this physical CPU. This is a consequence of doing dcache
-        * operations by set/way on this vcpu. We do it here to be in
-        * a non-preemptible section.
-        */
-       if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
-               flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
-
        kvm_arm_set_running_vcpu(vcpu);
 }
 
@@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
 
                vcpu->mode = OUTSIDE_GUEST_MODE;
-               vcpu->arch.last_pcpu = smp_processor_id();
                kvm_guest_exit();
                trace_kvm_exit(*vcpu_pc(vcpu));
                /*
index 7928dbdf210239a71f4f35e8ef289e9c2e8f0375..f3d88dc388bc560778d49dacfded70aebde44a89 100644 (file)
@@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
        return true;
 }
 
-/* See note at ARM ARM B1.14.4 */
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ */
 static bool access_dcsw(struct kvm_vcpu *vcpu,
                        const struct coproc_params *p,
                        const struct coproc_reg *r)
 {
-       unsigned long val;
-       int cpu;
-
        if (!p->is_write)
                return read_from_write_only(vcpu, p);
 
-       cpu = get_cpu();
-
-       cpumask_setall(&vcpu->arch.require_dcache_flush);
-       cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
-
-       /* If we were already preempted, take the long way around */
-       if (cpu != vcpu->arch.last_pcpu) {
-               flush_cache_all();
-               goto done;
-       }
-
-       val = *vcpu_reg(vcpu, p->Rt1);
-
-       switch (p->CRm) {
-       case 6:                 /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
-       case 14:                /* DCCISW */
-               asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
-               break;
-
-       case 10:                /* DCCSW */
-               asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
-               break;
-       }
-
-done:
-       put_cpu();
-
+       kvm_set_way_flush(vcpu);
        return true;
 }
 
 /*
  * Generic accessor for VM registers. Only called as long as HCR_TVM
- * is set.
+ * is set.  If the guest enables the MMU, we stop trapping the VM
+ * sys_regs and leave it in complete control of the caches.
+ *
+ * Used by the cpu-specific code.
  */
-static bool access_vm_reg(struct kvm_vcpu *vcpu,
-                         const struct coproc_params *p,
-                         const struct coproc_reg *r)
+bool access_vm_reg(struct kvm_vcpu *vcpu,
+                  const struct coproc_params *p,
+                  const struct coproc_reg *r)
 {
+       bool was_enabled = vcpu_has_cache_enabled(vcpu);
+
        BUG_ON(!p->is_write);
 
        vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
        if (p->is_64bit)
                vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
 
-       return true;
-}
-
-/*
- * SCTLR accessor. Only called as long as HCR_TVM is set.  If the
- * guest enables the MMU, we stop trapping the VM sys_regs and leave
- * it in complete control of the caches.
- *
- * Used by the cpu-specific code.
- */
-bool access_sctlr(struct kvm_vcpu *vcpu,
-                 const struct coproc_params *p,
-                 const struct coproc_reg *r)
-{
-       access_vm_reg(vcpu, p, r);
-
-       if (vcpu_has_cache_enabled(vcpu)) {     /* MMU+Caches enabled? */
-               vcpu->arch.hcr &= ~HCR_TVM;
-               stage2_flush_vm(vcpu->kvm);
-       }
-
+       kvm_toggle_cache(vcpu, was_enabled);
        return true;
 }
 
index 1a44bbe39643f519ec986d43dcd3e416881d13a9..88d24a3a977812b512e8e0c03144fec796727f8b 100644 (file)
@@ -153,8 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
 #define is64           .is_64 = true
 #define is32           .is_64 = false
 
-bool access_sctlr(struct kvm_vcpu *vcpu,
-                 const struct coproc_params *p,
-                 const struct coproc_reg *r);
+bool access_vm_reg(struct kvm_vcpu *vcpu,
+                  const struct coproc_params *p,
+                  const struct coproc_reg *r);
 
 #endif /* __ARM_KVM_COPROC_LOCAL_H__ */
index e6f4ae48bda968f8cac7caf6c94ffd1413631436..a7136757d3731534fd169e06938c74b85803cdfc 100644 (file)
@@ -34,7 +34,7 @@
 static const struct coproc_reg a15_regs[] = {
        /* SCTLR: swapped by interrupt.S. */
        { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
-                       access_sctlr, reset_val, c1_SCTLR, 0x00C50078 },
+                       access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
 };
 
 static struct kvm_coproc_target_table a15_target_table = {
index 17fc7cd479d3e75d322c207ffa35d42f8785d7ee..b19e46d1b2c08187cc70d4e86742ea4a470ec83f 100644 (file)
@@ -37,7 +37,7 @@
 static const struct coproc_reg a7_regs[] = {
        /* SCTLR: swapped by interrupt.S. */
        { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
-                       access_sctlr, reset_val, c1_SCTLR, 0x00C50878 },
+                       access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
 };
 
 static struct kvm_coproc_target_table a7_target_table = {
index 1dc9778a00af358431bbed021ba2d5244642debb..136662547ca6298f0fd5b6f2c73c7aea557ff472 100644 (file)
@@ -58,6 +58,26 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
                kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
 }
 
+/*
+ * D-Cache management functions. They take the page table entries by
+ * value, as they are flushing the cache using the kernel mapping (or
+ * kmap on 32bit).
+ */
+static void kvm_flush_dcache_pte(pte_t pte)
+{
+       __kvm_flush_dcache_pte(pte);
+}
+
+static void kvm_flush_dcache_pmd(pmd_t pmd)
+{
+       __kvm_flush_dcache_pmd(pmd);
+}
+
+static void kvm_flush_dcache_pud(pud_t pud)
+{
+       __kvm_flush_dcache_pud(pud);
+}
+
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
                                  int min, int max)
 {
@@ -119,6 +139,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
        put_page(virt_to_page(pmd));
 }
 
+/*
+ * Unmapping vs dcache management:
+ *
+ * If a guest maps certain memory pages as uncached, all writes will
+ * bypass the data cache and go directly to RAM.  However, the CPUs
+ * can still speculate reads (not writes) and fill cache lines with
+ * data.
+ *
+ * Those cache lines will be *clean* cache lines though, so a
+ * clean+invalidate operation is equivalent to an invalidate
+ * operation, because no cache lines are marked dirty.
+ *
+ * Those clean cache lines could be filled prior to an uncached write
+ * by the guest, and the cache coherent IO subsystem would therefore
+ * end up writing old data to disk.
+ *
+ * This is why right after unmapping a page/section and invalidating
+ * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
+ * the IO subsystem will never hit in the cache.
+ */
 static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
                       phys_addr_t addr, phys_addr_t end)
 {
@@ -128,9 +168,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
        start_pte = pte = pte_offset_kernel(pmd, addr);
        do {
                if (!pte_none(*pte)) {
+                       pte_t old_pte = *pte;
+
                        kvm_set_pte(pte, __pte(0));
-                       put_page(virt_to_page(pte));
                        kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+                       /* No need to invalidate the cache for device mappings */
+                       if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+                               kvm_flush_dcache_pte(old_pte);
+
+                       put_page(virt_to_page(pte));
                }
        } while (pte++, addr += PAGE_SIZE, addr != end);
 
@@ -149,8 +196,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
                next = kvm_pmd_addr_end(addr, end);
                if (!pmd_none(*pmd)) {
                        if (kvm_pmd_huge(*pmd)) {
+                               pmd_t old_pmd = *pmd;
+
                                pmd_clear(pmd);
                                kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+                               kvm_flush_dcache_pmd(old_pmd);
+
                                put_page(virt_to_page(pmd));
                        } else {
                                unmap_ptes(kvm, pmd, addr, next);
@@ -173,8 +225,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
                next = kvm_pud_addr_end(addr, end);
                if (!pud_none(*pud)) {
                        if (pud_huge(*pud)) {
+                               pud_t old_pud = *pud;
+
                                pud_clear(pud);
                                kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+                               kvm_flush_dcache_pud(old_pud);
+
                                put_page(virt_to_page(pud));
                        } else {
                                unmap_pmds(kvm, pud, addr, next);
@@ -209,10 +266,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
 
        pte = pte_offset_kernel(pmd, addr);
        do {
-               if (!pte_none(*pte)) {
-                       hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
-                       kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
-               }
+               if (!pte_none(*pte) &&
+                   (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+                       kvm_flush_dcache_pte(*pte);
        } while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
@@ -226,12 +282,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
        do {
                next = kvm_pmd_addr_end(addr, end);
                if (!pmd_none(*pmd)) {
-                       if (kvm_pmd_huge(*pmd)) {
-                               hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
-                               kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
-                       } else {
+                       if (kvm_pmd_huge(*pmd))
+                               kvm_flush_dcache_pmd(*pmd);
+                       else
                                stage2_flush_ptes(kvm, pmd, addr, next);
-                       }
                }
        } while (pmd++, addr = next, addr != end);
 }
@@ -246,12 +300,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
        do {
                next = kvm_pud_addr_end(addr, end);
                if (!pud_none(*pud)) {
-                       if (pud_huge(*pud)) {
-                               hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
-                               kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
-                       } else {
+                       if (pud_huge(*pud))
+                               kvm_flush_dcache_pud(*pud);
+                       else
                                stage2_flush_pmds(kvm, pud, addr, next);
-                       }
                }
        } while (pud++, addr = next, addr != end);
 }
@@ -278,7 +330,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
  * Go through the stage 2 page tables and invalidate any cache lines
  * backing memory already mapped to the VM.
  */
-void stage2_flush_vm(struct kvm *kvm)
+static void stage2_flush_vm(struct kvm *kvm)
 {
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
@@ -905,6 +957,12 @@ static bool kvm_is_device_pfn(unsigned long pfn)
        return !pfn_valid(pfn);
 }
 
+static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+                                     unsigned long size, bool uncached)
+{
+       __coherent_cache_guest_page(vcpu, pfn, size, uncached);
+}
+
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                          struct kvm_memory_slot *memslot, unsigned long hva,
                          unsigned long fault_status)
@@ -994,8 +1052,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        kvm_set_s2pmd_writable(&new_pmd);
                        kvm_set_pfn_dirty(pfn);
                }
-               coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
-                                         fault_ipa_uncached);
+               coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
                ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
        } else {
                pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -1003,8 +1060,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        kvm_set_s2pte_writable(&new_pte);
                        kvm_set_pfn_dirty(pfn);
                }
-               coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
-                                         fault_ipa_uncached);
+               coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
                ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
                        pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
        }
@@ -1411,3 +1467,71 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
        unmap_stage2_range(kvm, gpa, size);
        spin_unlock(&kvm->mmu_lock);
 }
+
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ *
+ * Main problems:
+ * - S/W ops are local to a CPU (not broadcast)
+ * - We have line migration behind our back (speculation)
+ * - System caches don't support S/W at all (damn!)
+ *
+ * In the face of the above, the best we can do is to try and convert
+ * S/W ops to VA ops. Because the guest is not allowed to infer the
+ * S/W to PA mapping, it can only use S/W to nuke the whole cache,
+ * which is a rather good thing for us.
+ *
+ * Also, it is only used when turning caches on/off ("The expected
+ * usage of the cache maintenance instructions that operate by set/way
+ * is associated with the cache maintenance instructions associated
+ * with the powerdown and powerup of caches, if this is required by
+ * the implementation.").
+ *
+ * We use the following policy:
+ *
+ * - If we trap a S/W operation, we enable VM trapping to detect
+ *   caches being turned on/off, and do a full clean.
+ *
+ * - We flush the caches on both caches being turned on and off.
+ *
+ * - Once the caches are enabled, we stop trapping VM ops.
+ */
+void kvm_set_way_flush(struct kvm_vcpu *vcpu)
+{
+       unsigned long hcr = vcpu_get_hcr(vcpu);
+
+       /*
+        * If this is the first time we do a S/W operation
+        * (i.e. HCR_TVM not set) flush the whole memory, and set the
+        * VM trapping.
+        *
+        * Otherwise, rely on the VM trapping to wait for the MMU +
+        * Caches to be turned off. At that point, we'll be able to
+        * clean the caches again.
+        */
+       if (!(hcr & HCR_TVM)) {
+               trace_kvm_set_way_flush(*vcpu_pc(vcpu),
+                                       vcpu_has_cache_enabled(vcpu));
+               stage2_flush_vm(vcpu->kvm);
+               vcpu_set_hcr(vcpu, hcr | HCR_TVM);
+       }
+}
+
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
+{
+       bool now_enabled = vcpu_has_cache_enabled(vcpu);
+
+       /*
+        * If switching the MMU+caches on, need to invalidate the caches.
+        * If switching it off, need to clean the caches.
+        * Clean + invalidate does the trick always.
+        */
+       if (now_enabled != was_enabled)
+               stage2_flush_vm(vcpu->kvm);
+
+       /* Caches are now on, stop trapping VM ops (until a S/W op) */
+       if (now_enabled)
+               vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
+
+       trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
+}
index b1d640f78623971337ad08072efdcc981c124c0f..b6a6e71022010bf09ae9b52aad981e745c67ef74 100644 (file)
@@ -223,6 +223,45 @@ TRACE_EVENT(kvm_hvc,
                  __entry->vcpu_pc, __entry->r0, __entry->imm)
 );
 
+TRACE_EVENT(kvm_set_way_flush,
+           TP_PROTO(unsigned long vcpu_pc, bool cache),
+           TP_ARGS(vcpu_pc, cache),
+
+           TP_STRUCT__entry(
+                   __field(    unsigned long,  vcpu_pc         )
+                   __field(    bool,           cache           )
+           ),
+
+           TP_fast_assign(
+                   __entry->vcpu_pc            = vcpu_pc;
+                   __entry->cache              = cache;
+           ),
+
+           TP_printk("S/W flush at 0x%016lx (cache %s)",
+                     __entry->vcpu_pc, __entry->cache ? "on" : "off")
+);
+
+TRACE_EVENT(kvm_toggle_cache,
+           TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
+           TP_ARGS(vcpu_pc, was, now),
+
+           TP_STRUCT__entry(
+                   __field(    unsigned long,  vcpu_pc         )
+                   __field(    bool,           was             )
+                   __field(    bool,           now             )
+           ),
+
+           TP_fast_assign(
+                   __entry->vcpu_pc            = vcpu_pc;
+                   __entry->was                = was;
+                   __entry->now                = now;
+           ),
+
+           TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
+                     __entry->vcpu_pc, __entry->was ? "on" : "off",
+                     __entry->now ? "on" : "off")
+);
+
 #endif /* _TRACE_KVM_H */
 
 #undef TRACE_INCLUDE_PATH
index 8fb9ef5333f17648d8a28aaa0385badc9c0ed65a..97f7367d32b8a2b9af60abb3f6b4df68dd860489 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/of_platform.h>
 #include <linux/phy.h>
 #include <linux/clk-provider.h>
+#include <linux/phy.h>
 
 #include <asm/setup.h>
 #include <asm/irq.h>
 
 #include "generic.h"
 
+static int ksz8081_phy_fixup(struct phy_device *phy)
+{
+       int value;
+
+       value = phy_read(phy, 0x16);
+       value &= ~0x20;
+       phy_write(phy, 0x16, value);
+
+       return 0;
+}
+
 static void __init sama5_dt_device_init(void)
 {
+       if (of_machine_is_compatible("atmel,sama5d4ek") &&
+          IS_ENABLED(CONFIG_PHYLIB)) {
+               phy_register_fixup_for_id("fc028000.etherne:00",
+                                               ksz8081_phy_fixup);
+       }
+
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
index 5951660d1bd2363db0326cd83b07fcec7cc908f1..2daef619d0534d626daef9ac2bfd8fcacbb91a19 100644 (file)
@@ -144,7 +144,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
                post_div_table[1].div = 1;
                post_div_table[2].div = 1;
                video_div_table[1].div = 1;
-               video_div_table[2].div = 1;
+               video_div_table[3].div = 1;
        }
 
        clk[IMX6QDL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
index 17354a11356fbd0291ca5629db26446370c951d2..5a3e5a159e708b35a13427b6c766d48bc8cf15b0 100644 (file)
@@ -558,6 +558,9 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
        clk_set_parent(clks[IMX6SX_CLK_GPU_CORE_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
        clk_set_parent(clks[IMX6SX_CLK_GPU_AXI_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
 
+       clk_set_parent(clks[IMX6SX_CLK_QSPI1_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
+       clk_set_parent(clks[IMX6SX_CLK_QSPI2_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
+
        /* Set initial power mode */
        imx6q_set_lpm(WAIT_CLOCKED);
 }
index 2f7616889c3f6872daf8ae4e68e79918efb8f4a5..5057d61298b71ace2c14c8dd0eb271f0506a0964 100644 (file)
@@ -31,8 +31,6 @@
 #include <linux/micrel_phy.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
-#include <linux/fec.h>
-#include <linux/netdevice.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/system_misc.h>
 #include "cpuidle.h"
 #include "hardware.h"
 
-static struct fec_platform_data fec_pdata;
-
-static void imx6q_fec_sleep_enable(int enabled)
-{
-       struct regmap *gpr;
-
-       gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
-       if (!IS_ERR(gpr)) {
-               if (enabled)
-                       regmap_update_bits(gpr, IOMUXC_GPR13,
-                                          IMX6Q_GPR13_ENET_STOP_REQ,
-                                          IMX6Q_GPR13_ENET_STOP_REQ);
-
-               else
-                       regmap_update_bits(gpr, IOMUXC_GPR13,
-                                          IMX6Q_GPR13_ENET_STOP_REQ, 0);
-       } else
-               pr_err("failed to find fsl,imx6q-iomux-gpr regmap\n");
-}
-
-static void __init imx6q_enet_plt_init(void)
-{
-       struct device_node *np;
-
-       np = of_find_node_by_path("/soc/aips-bus@02100000/ethernet@02188000");
-       if (np && of_get_property(np, "fsl,magic-packet", NULL))
-               fec_pdata.sleep_mode_enable = imx6q_fec_sleep_enable;
-}
-
 /* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */
 static int ksz9021rn_phy_fixup(struct phy_device *phydev)
 {
@@ -292,12 +261,6 @@ static void __init imx6q_axi_init(void)
        }
 }
 
-/* Add auxdata to pass platform data */
-static const struct of_dev_auxdata imx6q_auxdata_lookup[] __initconst = {
-       OF_DEV_AUXDATA("fsl,imx6q-fec", 0x02188000, NULL, &fec_pdata),
-       { /* sentinel */ }
-};
-
 static void __init imx6q_init_machine(void)
 {
        struct device *parent;
@@ -311,13 +274,11 @@ static void __init imx6q_init_machine(void)
 
        imx6q_enet_phy_init();
 
-       of_platform_populate(NULL, of_default_bus_match_table,
-                            imx6q_auxdata_lookup, parent);
+       of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
 
        imx_anatop_init();
        cpu_is_imx6q() ?  imx6q_pm_init() : imx6dl_pm_init();
        imx6q_1588_init();
-       imx6q_enet_plt_init();
        imx6q_axi_init();
 }
 
index 747b012665f5589a07fddb28f7bfe8f95c49994e..7a96c65772344f20e385993b6dd17057bd2af658 100644 (file)
 #include <linux/regmap.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
-#include <linux/fec.h>
-#include <linux/netdevice.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
 #include "common.h"
 #include "cpuidle.h"
 
-static struct fec_platform_data fec_pdata[2];
-
-static void imx6sx_fec1_sleep_enable(int enabled)
-{
-       struct regmap *gpr;
-
-       gpr = syscon_regmap_lookup_by_compatible("fsl,imx6sx-iomuxc-gpr");
-       if (!IS_ERR(gpr)) {
-               if (enabled)
-                       regmap_update_bits(gpr, IOMUXC_GPR4,
-                                          IMX6SX_GPR4_FEC_ENET1_STOP_REQ,
-                                          IMX6SX_GPR4_FEC_ENET1_STOP_REQ);
-               else
-                       regmap_update_bits(gpr, IOMUXC_GPR4,
-                                          IMX6SX_GPR4_FEC_ENET1_STOP_REQ, 0);
-       } else
-               pr_err("failed to find fsl,imx6sx-iomux-gpr regmap\n");
-}
-
-static void imx6sx_fec2_sleep_enable(int enabled)
-{
-       struct regmap *gpr;
-
-       gpr = syscon_regmap_lookup_by_compatible("fsl,imx6sx-iomuxc-gpr");
-       if (!IS_ERR(gpr)) {
-               if (enabled)
-                       regmap_update_bits(gpr, IOMUXC_GPR4,
-                                          IMX6SX_GPR4_FEC_ENET2_STOP_REQ,
-                                          IMX6SX_GPR4_FEC_ENET2_STOP_REQ);
-               else
-                       regmap_update_bits(gpr, IOMUXC_GPR4,
-                                          IMX6SX_GPR4_FEC_ENET2_STOP_REQ, 0);
-       } else
-               pr_err("failed to find fsl,imx6sx-iomux-gpr regmap\n");
-}
-
-static void __init imx6sx_enet_plt_init(void)
-{
-       struct device_node *np;
-
-       np = of_find_node_by_path("/soc/aips-bus@02100000/ethernet@02188000");
-       if (np && of_get_property(np, "fsl,magic-packet", NULL))
-               fec_pdata[0].sleep_mode_enable = imx6sx_fec1_sleep_enable;
-       np = of_find_node_by_path("/soc/aips-bus@02100000/ethernet@021b4000");
-       if (np && of_get_property(np, "fsl,magic-packet", NULL))
-               fec_pdata[1].sleep_mode_enable = imx6sx_fec2_sleep_enable;
-}
-
 static int ar8031_phy_fixup(struct phy_device *dev)
 {
        u16 val;
index 3585cb394e9b952388e4f26bfb5e950bfd0ad9eb..ccef8806bb58771b16a87dc80edc32e585597d29 100644 (file)
@@ -189,6 +189,13 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
        coherency_cpu_base = of_iomap(np, 0);
        arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
 
+       /*
+        * We should switch the PL310 to I/O coherency mode only if
+        * I/O coherency is actually enabled.
+        */
+       if (!coherency_available())
+               return;
+
        /*
         * Add the PL310 property "arm,io-coherent". This makes sure the
         * outer sync operation is not used, which allows to
@@ -246,9 +253,14 @@ static int coherency_type(void)
        return type;
 }
 
+/*
+ * As a precaution, we currently completely disable hardware I/O
+ * coherency, until enough testing is done with automatic I/O
+ * synchronization barriers to validate that it is a proper solution.
+ */
 int coherency_available(void)
 {
-       return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
+       return false;
 }
 
 int __init coherency_init(void)
index 608079a1aba6774e5ff6682354ecc08ba2d7a882..b61c049f92d6a361de57b16fe11c4773c12b88c6 100644 (file)
@@ -77,6 +77,24 @@ MACHINE_END
 #endif
 
 #ifdef CONFIG_ARCH_OMAP3
+/* Some boards need board name for legacy userspace in /proc/cpuinfo */
+static const char *const n900_boards_compat[] __initconst = {
+       "nokia,omap3-n900",
+       NULL,
+};
+
+DT_MACHINE_START(OMAP3_N900_DT, "Nokia RX-51 board")
+       .reserve        = omap_reserve,
+       .map_io         = omap3_map_io,
+       .init_early     = omap3430_init_early,
+       .init_machine   = omap_generic_init,
+       .init_late      = omap3_init_late,
+       .init_time      = omap3_sync32k_timer_init,
+       .dt_compat      = n900_boards_compat,
+       .restart        = omap3xxx_restart,
+MACHINE_END
+
+/* Generic omap3 boards, most boards can use these */
 static const char *const omap3_boards_compat[] __initconst = {
        "ti,omap3430",
        "ti,omap3",
index 377eea849e7bcdaf1142f6b1087ed6857a92b046..64e44d6d07c0c89ba5aa4fc3114111b32b261857 100644 (file)
@@ -211,6 +211,7 @@ extern struct device *omap2_get_iva_device(void);
 extern struct device *omap2_get_l3_device(void);
 extern struct device *omap4_get_dsp_device(void);
 
+unsigned int omap4_xlate_irq(unsigned int hwirq);
 void omap_gic_of_init(void);
 
 #ifdef CONFIG_CACHE_L2X0
@@ -249,6 +250,7 @@ extern void omap4_cpu_die(unsigned int cpu);
 extern struct smp_operations omap4_smp_ops;
 
 extern void omap5_secondary_startup(void);
+extern void omap5_secondary_hyp_startup(void);
 #endif
 
 #if defined(CONFIG_SMP) && defined(CONFIG_PM)
index a3c013345c45fa3b495924edaaa8628e636e147d..a80ac2d70bb1bca42084187851c09b914cd51871 100644 (file)
 #define OMAP5XXX_CONTROL_STATUS                0x134
 #define OMAP5_DEVICETYPE_MASK          (0x7 << 6)
 
+/* DRA7XX CONTROL CORE BOOTSTRAP */
+#define DRA7_CTRL_CORE_BOOTSTRAP       0x6c4
+#define DRA7_SPEEDSELECT_MASK          (0x3 << 8)
+
 /*
  * REVISIT: This list of registers is not comprehensive - there are more
  * that should be added.
index 4993d4bfe9b2a579d7adcc37726cb6828a78f130..6d1dffca6c7b6d68f3bd6e29a16bf5334c54a5fb 100644 (file)
@@ -22,6 +22,7 @@
 
 /* Physical address needed since MMU not enabled yet on secondary core */
 #define AUX_CORE_BOOT0_PA                      0x48281800
+#define API_HYP_ENTRY                          0x102
 
 /*
  * OMAP5 specific entry point for secondary CPU to jump from ROM
@@ -40,6 +41,26 @@ wait:        ldr     r2, =AUX_CORE_BOOT0_PA  @ read from AuxCoreBoot0
        bne     wait
        b       secondary_startup
 ENDPROC(omap5_secondary_startup)
+/*
+ * Same as omap5_secondary_startup except we call into the ROM to
+ * enable HYP mode first.  This is called instead of
+ * omap5_secondary_startup if the primary CPU was put into HYP mode by
+ * the boot loader.
+ */
+ENTRY(omap5_secondary_hyp_startup)
+wait_2:        ldr     r2, =AUX_CORE_BOOT0_PA  @ read from AuxCoreBoot0
+       ldr     r0, [r2]
+       mov     r0, r0, lsr #5
+       mrc     p15, 0, r4, c0, c0, 5
+       and     r4, r4, #0x0f
+       cmp     r0, r4
+       bne     wait_2
+       ldr     r12, =API_HYP_ENTRY
+       adr     r0, hyp_boot
+       smc     #0
+hyp_boot:
+       b       secondary_startup
+ENDPROC(omap5_secondary_hyp_startup)
 /*
  * OMAP4 specific entry point for secondary CPU to jump from ROM
  * code.  This routine also provides a holding flag into which
index 256e84ef0f679072324892a04d90817b81ceee36..5305ec7341eca5579398a10b72f263a2fbbe8e0e 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/irqchip/arm-gic.h>
 
 #include <asm/smp_scu.h>
+#include <asm/virt.h>
 
 #include "omap-secure.h"
 #include "omap-wakeupgen.h"
@@ -227,8 +228,16 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
        if (omap_secure_apis_support())
                omap_auxcoreboot_addr(virt_to_phys(startup_addr));
        else
-               writel_relaxed(virt_to_phys(omap5_secondary_startup),
-                              base + OMAP_AUX_CORE_BOOT_1);
+               /*
+                * If the boot CPU is in HYP mode then start secondary
+                * CPU in HYP mode as well.
+                */
+               if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
+                       writel_relaxed(virt_to_phys(omap5_secondary_hyp_startup),
+                                      base + OMAP_AUX_CORE_BOOT_1);
+               else
+                       writel_relaxed(virt_to_phys(omap5_secondary_startup),
+                                      base + OMAP_AUX_CORE_BOOT_1);
 
 }
 
index b7cb44abe49b35a7a03c3c9f172eca5daeb7b4a7..cc30e49a4cc278d08a44895fbf28797a94b6cfbd 100644 (file)
@@ -256,6 +256,38 @@ static int __init omap4_sar_ram_init(void)
 }
 omap_early_initcall(omap4_sar_ram_init);
 
+static struct of_device_id gic_match[] = {
+       { .compatible = "arm,cortex-a9-gic", },
+       { .compatible = "arm,cortex-a15-gic", },
+       { },
+};
+
+static struct device_node *gic_node;
+
+unsigned int omap4_xlate_irq(unsigned int hwirq)
+{
+       struct of_phandle_args irq_data;
+       unsigned int irq;
+
+       if (!gic_node)
+               gic_node = of_find_matching_node(NULL, gic_match);
+
+       if (WARN_ON(!gic_node))
+               return hwirq;
+
+       irq_data.np = gic_node;
+       irq_data.args_count = 3;
+       irq_data.args[0] = 0;
+       irq_data.args[1] = hwirq - OMAP44XX_IRQ_GIC_START;
+       irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
+
+       irq = irq_create_of_mapping(&irq_data);
+       if (WARN_ON(!irq))
+               irq = hwirq;
+
+       return irq;
+}
+
 void __init omap_gic_of_init(void)
 {
        struct device_node *np;
index cbb908dc5cf0e09bec45ce7fbf7814936883d9fc..9025ffffd2dc1d066fcb54a2cf44f2bf9a73525c 100644 (file)
@@ -3534,9 +3534,15 @@ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
 
        mpu_irqs_cnt = _count_mpu_irqs(oh);
        for (i = 0; i < mpu_irqs_cnt; i++) {
+               unsigned int irq;
+
+               if (oh->xlate_irq)
+                       irq = oh->xlate_irq((oh->mpu_irqs + i)->irq);
+               else
+                       irq = (oh->mpu_irqs + i)->irq;
                (res + r)->name = (oh->mpu_irqs + i)->name;
-               (res + r)->start = (oh->mpu_irqs + i)->irq;
-               (res + r)->end = (oh->mpu_irqs + i)->irq;
+               (res + r)->start = irq;
+               (res + r)->end = irq;
                (res + r)->flags = IORESOURCE_IRQ;
                r++;
        }
index 35ca6efbec31eb533ce039761024a7260371b2a1..5b42fafcaf55102fc5631b4095b920353c4329c8 100644 (file)
@@ -676,6 +676,7 @@ struct omap_hwmod {
        spinlock_t                      _lock;
        struct list_head                node;
        struct omap_hwmod_ocp_if        *_mpu_port;
+       unsigned int                    (*xlate_irq)(unsigned int);
        u16                             flags;
        u8                              mpu_rt_idx;
        u8                              response_lat;
index c314b3c31117e8cbee248db6aec7c0feb64f4fa9..f5e68a7820251360dc1aad459e259ee1c6d217ae 100644 (file)
@@ -479,6 +479,7 @@ static struct omap_hwmod omap44xx_dma_system_hwmod = {
        .class          = &omap44xx_dma_hwmod_class,
        .clkdm_name     = "l3_dma_clkdm",
        .mpu_irqs       = omap44xx_dma_system_irqs,
+       .xlate_irq      = omap4_xlate_irq,
        .main_clk       = "l3_div_ck",
        .prcm = {
                .omap4 = {
@@ -640,6 +641,7 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
        .class          = &omap44xx_dispc_hwmod_class,
        .clkdm_name     = "l3_dss_clkdm",
        .mpu_irqs       = omap44xx_dss_dispc_irqs,
+       .xlate_irq      = omap4_xlate_irq,
        .sdma_reqs      = omap44xx_dss_dispc_sdma_reqs,
        .main_clk       = "dss_dss_clk",
        .prcm = {
@@ -693,6 +695,7 @@ static struct omap_hwmod omap44xx_dss_dsi1_hwmod = {
        .class          = &omap44xx_dsi_hwmod_class,
        .clkdm_name     = "l3_dss_clkdm",
        .mpu_irqs       = omap44xx_dss_dsi1_irqs,
+       .xlate_irq      = omap4_xlate_irq,
        .sdma_reqs      = omap44xx_dss_dsi1_sdma_reqs,
        .main_clk       = "dss_dss_clk",
        .prcm = {
@@ -726,6 +729,7 @@ static struct omap_hwmod omap44xx_dss_dsi2_hwmod = {
        .class          = &omap44xx_dsi_hwmod_class,
        .clkdm_name     = "l3_dss_clkdm",
        .mpu_irqs       = omap44xx_dss_dsi2_irqs,
+       .xlate_irq      = omap4_xlate_irq,
        .sdma_reqs      = omap44xx_dss_dsi2_sdma_reqs,
        .main_clk       = "dss_dss_clk",
        .prcm = {
@@ -784,6 +788,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
         */
        .flags          = HWMOD_SWSUP_SIDLE,
        .mpu_irqs       = omap44xx_dss_hdmi_irqs,
+       .xlate_irq      = omap4_xlate_irq,
        .sdma_reqs      = omap44xx_dss_hdmi_sdma_reqs,
        .main_clk       = "dss_48mhz_clk",
        .prcm = {
index 3e9523084b2ace3005adbd18ceae347eaef3e66a..7c3fac035e936884febd606bcb9d0218428fd91c 100644 (file)
@@ -288,6 +288,7 @@ static struct omap_hwmod omap54xx_dma_system_hwmod = {
        .class          = &omap54xx_dma_hwmod_class,
        .clkdm_name     = "dma_clkdm",
        .mpu_irqs       = omap54xx_dma_system_irqs,
+       .xlate_irq      = omap4_xlate_irq,
        .main_clk       = "l3_iclk_div",
        .prcm = {
                .omap4 = {
index a8e4b582c527476972de36917c144570dd3665b4..6163d66102a3561890240487a592964874cb260c 100644 (file)
@@ -498,6 +498,7 @@ struct omap_prcm_irq_setup {
        u8 nr_irqs;
        const struct omap_prcm_irq *irqs;
        int irq;
+       unsigned int (*xlate_irq)(unsigned int);
        void (*read_pending_irqs)(unsigned long *events);
        void (*ocp_barrier)(void);
        void (*save_and_clear_irqen)(u32 *saved_mask);
index cc170fb81ff76dc018ad6eee2c0e9931ab575f08..408c64efb80700868fa4c8b0138a2763a78bc161 100644 (file)
@@ -49,6 +49,7 @@ static struct omap_prcm_irq_setup omap4_prcm_irq_setup = {
        .irqs                   = omap4_prcm_irqs,
        .nr_irqs                = ARRAY_SIZE(omap4_prcm_irqs),
        .irq                    = 11 + OMAP44XX_IRQ_GIC_START,
+       .xlate_irq              = omap4_xlate_irq,
        .read_pending_irqs      = &omap44xx_prm_read_pending_irqs,
        .ocp_barrier            = &omap44xx_prm_ocp_barrier,
        .save_and_clear_irqen   = &omap44xx_prm_save_and_clear_irqen,
@@ -751,8 +752,10 @@ static int omap44xx_prm_late_init(void)
                }
 
                /* Once OMAP4 DT is filled as well */
-               if (irq_num >= 0)
+               if (irq_num >= 0) {
                        omap4_prcm_irq_setup.irq = irq_num;
+                       omap4_prcm_irq_setup.xlate_irq = NULL;
+               }
        }
 
        omap44xx_prm_enable_io_wakeup();
index 779940cb6e5651d4d5c486878b5cbbef060af1dc..dea2833ca627c84ca67db08d77c24f834c3a0340 100644 (file)
@@ -187,6 +187,7 @@ int omap_prcm_event_to_irq(const char *name)
  */
 void omap_prcm_irq_cleanup(void)
 {
+       unsigned int irq;
        int i;
 
        if (!prcm_irq_setup) {
@@ -211,7 +212,11 @@ void omap_prcm_irq_cleanup(void)
        kfree(prcm_irq_setup->priority_mask);
        prcm_irq_setup->priority_mask = NULL;
 
-       irq_set_chained_handler(prcm_irq_setup->irq, NULL);
+       if (prcm_irq_setup->xlate_irq)
+               irq = prcm_irq_setup->xlate_irq(prcm_irq_setup->irq);
+       else
+               irq = prcm_irq_setup->irq;
+       irq_set_chained_handler(irq, NULL);
 
        if (prcm_irq_setup->base_irq > 0)
                irq_free_descs(prcm_irq_setup->base_irq,
@@ -259,6 +264,7 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
        int offset, i;
        struct irq_chip_generic *gc;
        struct irq_chip_type *ct;
+       unsigned int irq;
 
        if (!irq_setup)
                return -EINVAL;
@@ -298,7 +304,11 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
                                1 << (offset & 0x1f);
        }
 
-       irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler);
+       if (irq_setup->xlate_irq)
+               irq = irq_setup->xlate_irq(irq_setup->irq);
+       else
+               irq = irq_setup->irq;
+       irq_set_chained_handler(irq, omap_prcm_irq_handler);
 
        irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
                0);
index 4f61148ec1689b667f30a5259aa98037c5fa06ec..7d45c84c69ba38a3da362456c941bb39ef90d7a2 100644 (file)
@@ -54,6 +54,7 @@
 
 #include "soc.h"
 #include "common.h"
+#include "control.h"
 #include "powerdomain.h"
 #include "omap-secure.h"
 
@@ -496,7 +497,8 @@ static void __init realtime_counter_init(void)
        void __iomem *base;
        static struct clk *sys_clk;
        unsigned long rate;
-       unsigned int reg, num, den;
+       unsigned int reg;
+       unsigned long long num, den;
 
        base = ioremap(REALTIME_COUNTER_BASE, SZ_32);
        if (!base) {
@@ -511,13 +513,42 @@ static void __init realtime_counter_init(void)
        }
 
        rate = clk_get_rate(sys_clk);
+
+       if (soc_is_dra7xx()) {
+               /*
+                * Errata i856 says the 32.768KHz crystal does not start at
+                * power on, so the CPU falls back to an emulated 32KHz clock
+                * based on sysclk / 610 instead. This causes the master counter
+                * frequency to not be 6.144MHz but at sysclk / 610 * 375 / 2
+                * (OR sysclk * 75 / 244)
+                *
+                * This affects at least the DRA7/AM572x 1.0, 1.1 revisions.
+                * Of course any board built without a populated 32.768KHz
+                * crystal would also need this fix even if the CPU is fixed
+                * later.
+                *
+                * Either case can be detected by using the two speedselect bits
+                * If they are not 0, then the 32.768KHz clock driving the
+                * coarse counter that corrects the fine counter every time it
+                * ticks is actually rate/610 rather than 32.768KHz and we
+                * should compensate to avoid the 570ppm (at 20MHz, much worse
+                * at other rates) too fast system time.
+                */
+               reg = omap_ctrl_readl(DRA7_CTRL_CORE_BOOTSTRAP);
+               if (reg & DRA7_SPEEDSELECT_MASK) {
+                       num = 75;
+                       den = 244;
+                       goto sysclk1_based;
+               }
+       }
+
        /* Numerator/denumerator values refer TRM Realtime Counter section */
        switch (rate) {
-       case 1200000:
+       case 12000000:
                num = 64;
                den = 125;
                break;
-       case 1300000:
+       case 13000000:
                num = 768;
                den = 1625;
                break;
@@ -529,11 +560,11 @@ static void __init realtime_counter_init(void)
                num = 192;
                den = 625;
                break;
-       case 2600000:
+       case 26000000:
                num = 384;
                den = 1625;
                break;
-       case 2700000:
+       case 27000000:
                num = 256;
                den = 1125;
                break;
@@ -545,6 +576,7 @@ static void __init realtime_counter_init(void)
                break;
        }
 
+sysclk1_based:
        /* Program numerator and denumerator registers */
        reg = readl_relaxed(base + INCREMENTER_NUMERATOR_OFFSET) &
                        NUMERATOR_DENUMERATOR_MASK;
@@ -556,7 +588,7 @@ static void __init realtime_counter_init(void)
        reg |= den;
        writel_relaxed(reg, base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET);
 
-       arch_timer_freq = (rate / den) * num;
+       arch_timer_freq = DIV_ROUND_UP_ULL(rate * num, den);
        set_cntfreq();
 
        iounmap(base);
index 4457e731f7a4f0029cb3fe4e3811d5396097d2f2..292eca0e78ed07e3f7358c99671f8573b16aaecf 100644 (file)
@@ -66,19 +66,24 @@ void __init omap_pmic_init(int bus, u32 clkrate,
        omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
 }
 
+#ifdef CONFIG_ARCH_OMAP4
 void __init omap4_pmic_init(const char *pmic_type,
                    struct twl4030_platform_data *pmic_data,
                    struct i2c_board_info *devices, int nr_devices)
 {
        /* PMIC part*/
+       unsigned int irq;
+
        omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
        omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT);
-       omap_pmic_init(1, 400, pmic_type, 7 + OMAP44XX_IRQ_GIC_START, pmic_data);
+       irq = omap4_xlate_irq(7 + OMAP44XX_IRQ_GIC_START);
+       omap_pmic_init(1, 400, pmic_type, irq, pmic_data);
 
        /* Register additional devices on i2c1 bus if needed */
        if (devices)
                i2c_register_board_info(1, devices, nr_devices);
 }
+#endif
 
 void __init omap_pmic_late_init(void)
 {
index d226b71d21d5c6c0bdb702af93323f59934b22d4..a611f48525828fcef5cb1dbacc9d2430f5de8867 100644 (file)
 #include <linux/init.h>
 #include <linux/of_platform.h>
 #include <linux/irqchip.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/hardware/cache-l2x0.h>
 #include "core.h"
 
+#define RK3288_GRF_SOC_CON0 0x244
+
+static void __init rockchip_timer_init(void)
+{
+       if (of_machine_is_compatible("rockchip,rk3288")) {
+               struct regmap *grf;
+
+               /*
+                * Disable auto jtag/sdmmc switching that causes issues
+                * with the mmc controllers making them unreliable
+                */
+               grf = syscon_regmap_lookup_by_compatible("rockchip,rk3288-grf");
+               if (!IS_ERR(grf))
+                       regmap_write(grf, RK3288_GRF_SOC_CON0, 0x10000000);
+               else
+                       pr_err("rockchip: could not get grf syscon\n");
+       }
+
+       of_clk_init(NULL);
+       clocksource_of_init();
+}
+
 static void __init rockchip_dt_init(void)
 {
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
@@ -42,6 +68,7 @@ static const char * const rockchip_board_dt_compat[] = {
 DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)")
        .l2c_aux_val    = 0,
        .l2c_aux_mask   = ~0,
+       .init_time      = rockchip_timer_init,
        .dt_compat      = rockchip_board_dt_compat,
        .init_machine   = rockchip_dt_init,
 MACHINE_END
index 66f67816a844623977a4595ef23642ed381b3549..444f22d370f0ef7a4e35957abfc3d458e3d7e49e 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/gpio_keys.h>
 #include <linux/input.h>
 #include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
 #include <linux/kernel.h>
 #include <linux/mfd/tmio.h>
 #include <linux/mmc/host.h>
@@ -273,6 +275,22 @@ static void __init ape6evm_add_standard_devices(void)
                                      sizeof(ape6evm_leds_pdata));
 }
 
+static void __init ape6evm_legacy_init_time(void)
+{
+       /* Do not invoke DT-based timers via clocksource_of_init() */
+}
+
+static void __init ape6evm_legacy_init_irq(void)
+{
+       void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
+       void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
+
+       gic_init(0, 29, gic_dist_base, gic_cpu_base);
+
+       /* Do not invoke DT-based interrupt code via irqchip_init() */
+}
+
+
 static const char *ape6evm_boards_compat_dt[] __initdata = {
        "renesas,ape6evm",
        NULL,
@@ -280,7 +298,9 @@ static const char *ape6evm_boards_compat_dt[] __initdata = {
 
 DT_MACHINE_START(APE6EVM_DT, "ape6evm")
        .init_early     = shmobile_init_delay,
+       .init_irq       = ape6evm_legacy_init_irq,
        .init_machine   = ape6evm_add_standard_devices,
        .init_late      = shmobile_init_late,
        .dt_compat      = ape6evm_boards_compat_dt,
+       .init_time      = ape6evm_legacy_init_time,
 MACHINE_END
index f8197eb6e5669ada1b8ddd57e7bb09e42bedfe62..65b128dd4072b8070fb1bb65edd97b7abb4c3d5e 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/input.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
 #include <linux/kernel.h>
 #include <linux/leds.h>
 #include <linux/mfd/tmio.h>
@@ -811,6 +813,16 @@ static void __init lager_init(void)
                                          lager_ksz8041_fixup);
 }
 
+static void __init lager_legacy_init_irq(void)
+{
+       void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
+       void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
+
+       gic_init(0, 29, gic_dist_base, gic_cpu_base);
+
+       /* Do not invoke DT-based interrupt code via irqchip_init() */
+}
+
 static const char * const lager_boards_compat_dt[] __initconst = {
        "renesas,lager",
        NULL,
@@ -819,6 +831,7 @@ static const char * const lager_boards_compat_dt[] __initconst = {
 DT_MACHINE_START(LAGER_DT, "lager")
        .smp            = smp_ops(r8a7790_smp_ops),
        .init_early     = shmobile_init_delay,
+       .init_irq       = lager_legacy_init_irq,
        .init_time      = rcar_gen2_timer_init,
        .init_machine   = lager_init,
        .init_late      = shmobile_init_late,
index 79ad93dfdae4ee7083f2c2f1351ee863355d20c4..d191cf4197313482b961f1a6ed91f11954eff9a4 100644 (file)
@@ -800,7 +800,14 @@ void __init r8a7740_init_irq_of(void)
        void __iomem *intc_msk_base = ioremap_nocache(0xe6900040, 0x10);
        void __iomem *pfc_inta_ctrl = ioremap_nocache(0xe605807c, 0x4);
 
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+       void __iomem *gic_dist_base = ioremap_nocache(0xc2800000, 0x1000);
+       void __iomem *gic_cpu_base = ioremap_nocache(0xc2000000, 0x1000);
+
+       gic_init(0, 29, gic_dist_base, gic_cpu_base);
+#else
        irqchip_init();
+#endif
 
        /* route signals to GIC */
        iowrite32(0x0, pfc_inta_ctrl);
index 170bd146ba1796b801f27e0ca74e2cfd79f0c7e8..cef8895a9b8271dcd27549b7a5f6209cc4cb9abb 100644 (file)
@@ -576,11 +576,18 @@ void __init r8a7778_init_irq_extpin(int irlm)
 void __init r8a7778_init_irq_dt(void)
 {
        void __iomem *base = ioremap_nocache(0xfe700000, 0x00100000);
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+       void __iomem *gic_dist_base = ioremap_nocache(0xfe438000, 0x1000);
+       void __iomem *gic_cpu_base = ioremap_nocache(0xfe430000, 0x1000);
+#endif
 
        BUG_ON(!base);
 
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+       gic_init(0, 29, gic_dist_base, gic_cpu_base);
+#else
        irqchip_init();
-
+#endif
        /* route all interrupts to ARM */
        __raw_writel(0x73ffffff, base + INT2NTSR0);
        __raw_writel(0xffffffff, base + INT2NTSR1);
index 6156d172cf3108d79c44a397931037a53f1c5266..27dceaf9e688c174910004fc80598f4beb130075 100644 (file)
@@ -720,10 +720,17 @@ static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
 
 void __init r8a7779_init_irq_dt(void)
 {
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+       void __iomem *gic_dist_base = ioremap_nocache(0xf0001000, 0x1000);
+       void __iomem *gic_cpu_base = ioremap_nocache(0xf0000100, 0x1000);
+#endif
        gic_arch_extn.irq_set_wake = r8a7779_set_wake;
 
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+       gic_init(0, 29, gic_dist_base, gic_cpu_base);
+#else
        irqchip_init();
-
+#endif
        /* route all interrupts to ARM */
        __raw_writel(0xffffffff, INT2NTSR0);
        __raw_writel(0x3fffffff, INT2NTSR1);
index 3dd6edd9bd1d3dfe5b6bb7787e4d672c1a6f63be..cc9470dfb1cee51eb50d0597603c6a195d67af5f 100644 (file)
@@ -133,7 +133,9 @@ void __init rcar_gen2_timer_init(void)
 #ifdef CONFIG_COMMON_CLK
        rcar_gen2_clocks_init(mode);
 #endif
+#ifdef CONFIG_ARCH_SHMOBILE_MULTI
        clocksource_of_init();
+#endif
 }
 
 struct memory_reserve_config {
index 93ebe3430bfe707ac234b35f37deb54a1998a531..fb5e1bb34be80b1d5c529a728e55b7a7d41fa8c8 100644 (file)
@@ -595,6 +595,7 @@ static struct platform_device ipmmu_device = {
 
 static struct renesas_intc_irqpin_config irqpin0_platform_data = {
        .irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */
+       .control_parent = true,
 };
 
 static struct resource irqpin0_resources[] = {
@@ -656,6 +657,7 @@ static struct platform_device irqpin1_device = {
 
 static struct renesas_intc_irqpin_config irqpin2_platform_data = {
        .irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */
+       .control_parent = true,
 };
 
 static struct resource irqpin2_resources[] = {
@@ -686,6 +688,7 @@ static struct platform_device irqpin2_device = {
 
 static struct renesas_intc_irqpin_config irqpin3_platform_data = {
        .irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */
+       .control_parent = true,
 };
 
 static struct resource irqpin3_resources[] = {
index f1d027aa7a81ac3361401380553771bdb37d47d2..0edf2a6d2bbef7f78fcb8f5c0396445dde21618c 100644 (file)
@@ -70,6 +70,18 @@ void __init shmobile_init_delay(void)
        if (!max_freq)
                return;
 
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+       /* Non-multiplatform r8a73a4 SoC cannot use arch timer due
+        * to GIC being initialized from C and arch timer via DT */
+       if (of_machine_is_compatible("renesas,r8a73a4"))
+               has_arch_timer = false;
+
+       /* Non-multiplatform r8a7790 SoC cannot use arch timer due
+        * to GIC being initialized from C and arch timer via DT */
+       if (of_machine_is_compatible("renesas,r8a7790"))
+               has_arch_timer = false;
+#endif
+
        if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
                if (is_a7_a8_a9)
                        shmobile_setup_delay_hz(max_freq, 1, 3);
index 03823e784f63e7acf91fdda4d5f58927ccf527d9..c43c714555661337048b72a5a21a6b5357659567 100644 (file)
@@ -1012,6 +1012,7 @@ config ARCH_SUPPORTS_BIG_ENDIAN
 
 config ARM_KERNMEM_PERMS
        bool "Restrict kernel memory permissions"
+       depends on MMU
        help
          If this is set, kernel memory other than kernel text (and rodata)
          will be made non-executable. The tradeoff is that each region is
index 91892569710f5ab79127218e808e84ef14ef33eb..845769e413323120b6d7b4afed7640746413bc98 100644 (file)
@@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu)
        /* Update the list of reserved ASIDs and the ASID bitmap. */
        bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
        for_each_possible_cpu(i) {
-               if (i == cpu) {
-                       asid = 0;
-               } else {
-                       asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
-                       /*
-                        * If this CPU has already been through a
-                        * rollover, but hasn't run another task in
-                        * the meantime, we must preserve its reserved
-                        * ASID, as this is the only trace we have of
-                        * the process it is still running.
-                        */
-                       if (asid == 0)
-                               asid = per_cpu(reserved_asids, i);
-                       __set_bit(asid & ~ASID_MASK, asid_map);
-               }
+               asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
+               /*
+                * If this CPU has already been through a
+                * rollover, but hasn't run another task in
+                * the meantime, we must preserve its reserved
+                * ASID, as this is the only trace we have of
+                * the process it is still running.
+                */
+               if (asid == 0)
+                       asid = per_cpu(reserved_asids, i);
+               __set_bit(asid & ~ASID_MASK, asid_map);
                per_cpu(reserved_asids, i) = asid;
        }
 
index 7864797609b3849628455782c79949f094f6997e..903dba064a034c7e5d9fff950d3fa334301130d9 100644 (file)
@@ -1940,13 +1940,32 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
 }
 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
 
+static int __arm_iommu_attach_device(struct device *dev,
+                                    struct dma_iommu_mapping *mapping)
+{
+       int err;
+
+       err = iommu_attach_device(mapping->domain, dev);
+       if (err)
+               return err;
+
+       kref_get(&mapping->kref);
+       dev->archdata.mapping = mapping;
+
+       pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
+       return 0;
+}
+
 /**
  * arm_iommu_attach_device
  * @dev: valid struct device pointer
  * @mapping: io address space mapping structure (returned from
  *     arm_iommu_create_mapping)
  *
- * Attaches specified io address space mapping to the provided device,
+ * Attaches specified io address space mapping to the provided device.
+ * This replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version.
+ *
  * More than one client might be attached to the same io address space
  * mapping.
  */
@@ -1955,25 +1974,16 @@ int arm_iommu_attach_device(struct device *dev,
 {
        int err;
 
-       err = iommu_attach_device(mapping->domain, dev);
+       err = __arm_iommu_attach_device(dev, mapping);
        if (err)
                return err;
 
-       kref_get(&mapping->kref);
-       dev->archdata.mapping = mapping;
-
-       pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
+       set_dma_ops(dev, &iommu_ops);
        return 0;
 }
 EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
 
-/**
- * arm_iommu_detach_device
- * @dev: valid struct device pointer
- *
- * Detaches the provided device from a previously attached map.
- */
-void arm_iommu_detach_device(struct device *dev)
+static void __arm_iommu_detach_device(struct device *dev)
 {
        struct dma_iommu_mapping *mapping;
 
@@ -1989,6 +1999,19 @@ void arm_iommu_detach_device(struct device *dev)
 
        pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
 }
+
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
+{
+       __arm_iommu_detach_device(dev);
+       set_dma_ops(dev, NULL);
+}
 EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
 
 static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@@ -2011,7 +2034,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
                return false;
        }
 
-       if (arm_iommu_attach_device(dev, mapping)) {
+       if (__arm_iommu_attach_device(dev, mapping)) {
                pr_warn("Failed to attached device %s to IOMMU_mapping\n",
                                dev_name(dev));
                arm_iommu_release_mapping(mapping);
@@ -2025,7 +2048,10 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
 {
        struct dma_iommu_mapping *mapping = dev->archdata.mapping;
 
-       arm_iommu_detach_device(dev);
+       if (!mapping)
+               return;
+
+       __arm_iommu_detach_device(dev);
        arm_iommu_release_mapping(mapping);
 }
 
index 59424937e52b8839c4fb4504aedbc93584b4304e..9fe8e241335c6edcb0db5077f5d4621aefb68944 100644 (file)
@@ -220,9 +220,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
        static const char units[] = "KMGTPE";
        u64 prot = val & pg_level[level].mask;
 
-       if (addr < USER_PGTABLES_CEILING)
-               return;
-
        if (!st->level) {
                st->level = level;
                st->current_prot = prot;
@@ -308,15 +305,13 @@ static void walk_pgd(struct seq_file *m)
        pgd_t *pgd = swapper_pg_dir;
        struct pg_state st;
        unsigned long addr;
-       unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE;
+       unsigned i;
 
        memset(&st, 0, sizeof(st));
        st.seq = m;
        st.marker = address_markers;
 
-       pgd += pgdoff;
-
-       for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) {
+       for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
                addr = i * PGDIR_SIZE;
                if (!pgd_none(*pgd)) {
                        walk_pud(&st, pgd, addr);
index 98ad9c79ea0e6a1e980a5f0c09e4c25389e060e2..2495c8cb47baaddcdb15a209406a9e4b9b4f1f25 100644 (file)
@@ -658,8 +658,8 @@ static struct section_perm ro_perms[] = {
                .start  = (unsigned long)_stext,
                .end    = (unsigned long)__init_begin,
 #ifdef CONFIG_ARM_LPAE
-               .mask   = ~PMD_SECT_RDONLY,
-               .prot   = PMD_SECT_RDONLY,
+               .mask   = ~L_PMD_SECT_RDONLY,
+               .prot   = L_PMD_SECT_RDONLY,
 #else
                .mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
                .prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
index cda7c40999b6692fef5cb4a4b869b5608d87c5ec..4e6ef896c6195db73f770957e9df619a0be05e06 100644 (file)
@@ -1329,8 +1329,8 @@ static void __init kmap_init(void)
 static void __init map_lowmem(void)
 {
        struct memblock_region *reg;
-       unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
-       unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+       phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+       phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
 
        /* Map all the lowmem memory banks. */
        for_each_memblock(memory, reg) {
index 1c43cec971b5cd7196b367d1917baa25a10078bd..0666888639202f4beeb0d0086d969841a96f1700 100644 (file)
@@ -85,6 +85,7 @@ vdso_install:
 # We use MRPROPER_FILES and CLEAN_FILES now
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
+       $(Q)$(MAKE) $(clean)=$(boot)/dts
 
 define archhelp
   echo  '* Image.gz      - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
index 3b8d427c398599c85c8d12e3f67f20095ec43bac..c62b0f4d9ef65bf4dfa8503d581c159e5fe9b16b 100644 (file)
@@ -3,6 +3,4 @@ dts-dirs += apm
 dts-dirs += arm
 dts-dirs += cavium
 
-always         := $(dtb-y)
 subdir-y       := $(dts-dirs)
-clean-files    := *.dtb
index cb3073e4e7a83e555992ea544abb658607512ab5..d429129ecb3d03fe3a7460ecd3ed9d02950cb193 100644 (file)
@@ -22,7 +22,7 @@
        };
 
        chosen {
-               stdout-path = &soc_uart0;
+               stdout-path = "serial0:115200n8";
        };
 
        psci {
index b1fa4e61471814f78e4c5f99ed349afc98541121..fbe0ca31a99cafc0769a6d6b93622016af637ee4 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <asm/barrier.h>
 
+#include <linux/bug.h>
 #include <linux/init.h>
 #include <linux/types.h>
 
index ace70682499b69b3e23e36215bb8760c6ea67cb3..8e797b2fcc0186b6f5f505303b51fbea0eff2e94 100644 (file)
@@ -39,6 +39,7 @@ struct cpuinfo_arm64 {
        u64             reg_id_aa64pfr0;
        u64             reg_id_aa64pfr1;
 
+       u32             reg_id_dfr0;
        u32             reg_id_isar0;
        u32             reg_id_isar1;
        u32             reg_id_isar2;
@@ -51,6 +52,10 @@ struct cpuinfo_arm64 {
        u32             reg_id_mmfr3;
        u32             reg_id_pfr0;
        u32             reg_id_pfr1;
+
+       u32             reg_mvfr0;
+       u32             reg_mvfr1;
+       u32             reg_mvfr2;
 };
 
 DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
index 8127e45e263752821c833d1c354a8033372b2a47..3cb4c856b10da40a73c88138d97a2586b21eb6ff 100644 (file)
@@ -41,6 +41,18 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+       if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
+               vcpu->arch.hcr_el2 &= ~HCR_RW;
+}
+
+static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.hcr_el2;
+}
+
+static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
+{
+       vcpu->arch.hcr_el2 = hcr;
 }
 
 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
index 0b7dfdb931dff6f9610df181015ce7ecc1a7f16a..acd101a9014d374d2c235f5936a2b6a5c1d7c28f 100644 (file)
@@ -116,9 +116,6 @@ struct kvm_vcpu_arch {
         * Anything that is not used directly from assembly code goes
         * here.
         */
-       /* dcache set/way operation pending */
-       int last_pcpu;
-       cpumask_t require_dcache_flush;
 
        /* Don't run the guest */
        bool pause;
index 14a74f136272b94852d86901ae75c4f329ee2ee3..adcf49547301b1acc598e72722752b36240220be 100644 (file)
@@ -243,24 +243,46 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
        return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
 }
 
-static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
-                                            unsigned long size,
-                                            bool ipa_uncached)
+static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+                                              unsigned long size,
+                                              bool ipa_uncached)
 {
+       void *va = page_address(pfn_to_page(pfn));
+
        if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
-               kvm_flush_dcache_to_poc((void *)hva, size);
+               kvm_flush_dcache_to_poc(va, size);
 
        if (!icache_is_aliasing()) {            /* PIPT */
-               flush_icache_range(hva, hva + size);
+               flush_icache_range((unsigned long)va,
+                                  (unsigned long)va + size);
        } else if (!icache_is_aivivt()) {       /* non ASID-tagged VIVT */
                /* any kind of VIPT cache */
                __flush_icache_all();
        }
 }
 
+static inline void __kvm_flush_dcache_pte(pte_t pte)
+{
+       struct page *page = pte_page(pte);
+       kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+{
+       struct page *page = pmd_page(pmd);
+       kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pud(pud_t pud)
+{
+       struct page *page = pud_page(pud);
+       kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
+}
+
 #define kvm_virt_to_phys(x)            __virt_to_phys((unsigned long)(x))
 
-void stage2_flush_vm(struct kvm *kvm);
+void kvm_set_way_flush(struct kvm_vcpu *vcpu);
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
index 286b1bec547ce2a060d01cf816893b2b4aef9d12..f9be30ea1cbd8bc5b00cf2627c2e0be47ab54d98 100644 (file)
@@ -31,6 +31,7 @@
 
 #include <asm/fpsimd.h>
 #include <asm/hw_breakpoint.h>
+#include <asm/pgtable-hwdef.h>
 #include <asm/ptrace.h>
 #include <asm/types.h>
 
@@ -123,9 +124,6 @@ struct task_struct;
 /* Free all resources held by a thread. */
 extern void release_thread(struct task_struct *);
 
-/* Prepare to copy thread state - unlazy all lazy status */
-#define prepare_to_copy(tsk)   do { } while (0)
-
 unsigned long get_wchan(struct task_struct *p);
 
 #define cpu_relax()                    barrier()
index 49c9aefd24a50e1892c4df018e784fc44be4617e..23e9432ac11240a15b5dc4fecfe7d275cdfb10db 100644 (file)
@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_cacheflush     (__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE+5)
 
-#define __NR_compat_syscalls           386
+#define __NR_compat_syscalls           388
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
index 8893cebcea5b8d903fed25db1a6dcb6effa14cef..27224426e0bf920de713c22cd4b6ca125c84d9bf 100644 (file)
@@ -795,3 +795,5 @@ __SYSCALL(__NR_getrandom, sys_getrandom)
 __SYSCALL(__NR_memfd_create, sys_memfd_create)
 #define __NR_bpf 386
 __SYSCALL(__NR_bpf, sys_bpf)
+#define __NR_execveat 387
+__SYSCALL(__NR_execveat, compat_sys_execveat)
index 57b641747534a4bb7a8e0901b685d092b89fbfdc..07d435cf2eea6ee4da81e158b4e26a6be5b14c29 100644 (file)
@@ -147,6 +147,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
         * If we have AArch32, we care about 32-bit features for compat. These
         * registers should be RES0 otherwise.
         */
+       diff |= CHECK(id_dfr0, boot, cur, cpu);
        diff |= CHECK(id_isar0, boot, cur, cpu);
        diff |= CHECK(id_isar1, boot, cur, cpu);
        diff |= CHECK(id_isar2, boot, cur, cpu);
@@ -165,6 +166,10 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
        diff |= CHECK(id_pfr0, boot, cur, cpu);
        diff |= CHECK(id_pfr1, boot, cur, cpu);
 
+       diff |= CHECK(mvfr0, boot, cur, cpu);
+       diff |= CHECK(mvfr1, boot, cur, cpu);
+       diff |= CHECK(mvfr2, boot, cur, cpu);
+
        /*
         * Mismatched CPU features are a recipe for disaster. Don't even
         * pretend to support them.
@@ -189,6 +194,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
        info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
        info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
 
+       info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
        info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
        info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
        info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
@@ -202,6 +208,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
        info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
        info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
 
+       info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+       info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+       info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+
        cpuinfo_detect_icache_policy(info);
 
        check_local_cpu_errata();
index 6fac253bc783a44066630643c0b57153825d0be9..2bb4347d0edfd9d703fb96768fcdf6a58ea9aad0 100644 (file)
@@ -326,6 +326,7 @@ void __init efi_idmap_init(void)
 
        /* boot time idmap_pg_dir is incomplete, so fill in missing parts */
        efi_setup_idmap();
+       early_memunmap(memmap.map, memmap.map_end - memmap.map);
 }
 
 static int __init remap_region(efi_memory_desc_t *md, void **new)
@@ -380,7 +381,6 @@ static int __init arm64_enter_virtual_mode(void)
        }
 
        mapsize = memmap.map_end - memmap.map;
-       early_memunmap(memmap.map, mapsize);
 
        if (efi_runtime_disabled()) {
                pr_info("EFI runtime services will be disabled.\n");
index fd027b101de59fd350ed45d2ec928693e4b9be41..9b6f71db270952ad72cbfb30c767fcf2320092f9 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/mm.h>
 #include <linux/moduleloader.h>
 #include <linux/vmalloc.h>
+#include <asm/alternative.h>
 #include <asm/insn.h>
 #include <asm/sections.h>
 
index 6762ad705587fa34fff0281546273a6930ddbcbf..3f62b35fb6f157c49c1adb8b4cc3ec2744cc1e48 100644 (file)
@@ -50,3 +50,11 @@ u64 perf_reg_abi(struct task_struct *task)
        else
                return PERF_SAMPLE_REGS_ABI_64;
 }
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+                       struct pt_regs *regs,
+                       struct pt_regs *regs_user_copy)
+{
+       regs_user->regs = task_pt_regs(current);
+       regs_user->abi = perf_reg_abi(current);
+}
index b8099116675459b933b290fd3fda5fd3cd3ecdb3..20fe2932ad0c47d50d0c836acd35686c8777b98a 100644 (file)
@@ -402,6 +402,7 @@ void __init setup_arch(char **cmdline_p)
        request_standard_resources();
 
        efi_idmap_init();
+       early_ioremap_reset();
 
        unflatten_device_tree();
 
index 4f93c67e63de34293dadc0baeb23fe944b27888c..14944e5b28dace9ea083e74f4849d4f40eadef95 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/cacheflush.h>
 #include <asm/cpu_ops.h>
 #include <asm/cputype.h>
+#include <asm/io.h>
 #include <asm/smp_plat.h>
 
 extern void secondary_holding_pen(void);
index fbe909fb0a1a8b95ab4f6e3ade19daaa21c70436..c3ca89c27c6b351839ec62f763ca99d34ac5b3c2 100644 (file)
@@ -1014,6 +1014,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
         * Instead, we invalidate Stage-2 for this IPA, and the
         * whole of Stage-1. Weep...
         */
+       lsr     x1, x1, #12
        tlbi    ipas2e1is, x1
        /*
         * We have to ensure completion of the invalidation at Stage-2,
index 70a7816535cd4a9bf575b9767a9a9fd62dbe21e6..0b43265789858cbe71f761eebbc48927834b7fe8 100644 (file)
@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                        if (!cpu_has_32bit_el1())
                                return -EINVAL;
                        cpu_reset = &default_regs_reset32;
-                       vcpu->arch.hcr_el2 &= ~HCR_RW;
                } else {
                        cpu_reset = &default_regs_reset;
                }
index 3d7c2df89946cc1d1606a4b3401115f10e44ab71..f31e8bb2bc5bd0c7aec76e654fce52f5723e527b 100644 (file)
@@ -69,68 +69,31 @@ static u32 get_ccsidr(u32 csselr)
        return ccsidr;
 }
 
-static void do_dc_cisw(u32 val)
-{
-       asm volatile("dc cisw, %x0" : : "r" (val));
-       dsb(ish);
-}
-
-static void do_dc_csw(u32 val)
-{
-       asm volatile("dc csw, %x0" : : "r" (val));
-       dsb(ish);
-}
-
-/* See note at ARM ARM B1.14.4 */
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ */
 static bool access_dcsw(struct kvm_vcpu *vcpu,
                        const struct sys_reg_params *p,
                        const struct sys_reg_desc *r)
 {
-       unsigned long val;
-       int cpu;
-
        if (!p->is_write)
                return read_from_write_only(vcpu, p);
 
-       cpu = get_cpu();
-
-       cpumask_setall(&vcpu->arch.require_dcache_flush);
-       cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
-
-       /* If we were already preempted, take the long way around */
-       if (cpu != vcpu->arch.last_pcpu) {
-               flush_cache_all();
-               goto done;
-       }
-
-       val = *vcpu_reg(vcpu, p->Rt);
-
-       switch (p->CRm) {
-       case 6:                 /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
-       case 14:                /* DCCISW */
-               do_dc_cisw(val);
-               break;
-
-       case 10:                /* DCCSW */
-               do_dc_csw(val);
-               break;
-       }
-
-done:
-       put_cpu();
-
+       kvm_set_way_flush(vcpu);
        return true;
 }
 
 /*
  * Generic accessor for VM registers. Only called as long as HCR_TVM
- * is set.
+ * is set. If the guest enables the MMU, we stop trapping the VM
+ * sys_regs and leave it in complete control of the caches.
  */
 static bool access_vm_reg(struct kvm_vcpu *vcpu,
                          const struct sys_reg_params *p,
                          const struct sys_reg_desc *r)
 {
        unsigned long val;
+       bool was_enabled = vcpu_has_cache_enabled(vcpu);
 
        BUG_ON(!p->is_write);
 
@@ -143,25 +106,7 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
                vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
        }
 
-       return true;
-}
-
-/*
- * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set.  If the
- * guest enables the MMU, we stop trapping the VM sys_regs and leave
- * it in complete control of the caches.
- */
-static bool access_sctlr(struct kvm_vcpu *vcpu,
-                        const struct sys_reg_params *p,
-                        const struct sys_reg_desc *r)
-{
-       access_vm_reg(vcpu, p, r);
-
-       if (vcpu_has_cache_enabled(vcpu)) {     /* MMU+Caches enabled? */
-               vcpu->arch.hcr_el2 &= ~HCR_TVM;
-               stage2_flush_vm(vcpu->kvm);
-       }
-
+       kvm_toggle_cache(vcpu, was_enabled);
        return true;
 }
 
@@ -377,7 +322,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
          NULL, reset_mpidr, MPIDR_EL1 },
        /* SCTLR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
-         access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
+         access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
        /* CPACR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
          NULL, reset_val, CPACR_EL1, 0 },
@@ -657,7 +602,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
  * register).
  */
 static const struct sys_reg_desc cp15_regs[] = {
-       { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
+       { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
        { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
        { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
        { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
index cf33f33333ccd230720207a071399863bd4eb0de..d54dc9ac4b70874af52e4c94054c850b576c9f0d 100644 (file)
@@ -15,6 +15,7 @@
  */
 #include <linux/debugfs.h>
 #include <linux/fs.h>
+#include <linux/io.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
index bac492c12fcc4bd054e09f8db8022b64dcaa8f74..c95464a33f36175d1f7905ad61bd5176654efd0f 100644 (file)
@@ -335,14 +335,8 @@ static int keep_initrd;
 
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
-       if (!keep_initrd) {
-               if (start == initrd_start)
-                       start = round_down(start, PAGE_SIZE);
-               if (end == initrd_end)
-                       end = round_up(end, PAGE_SIZE);
-
+       if (!keep_initrd)
                free_reserved_area((void *)start, (void *)end, 0, "initrd");
-       }
 }
 
 static int __init keepinitrd_setup(char *__unused)
index 2c9412908024d4ce88d8945cf953b0d24437dcaf..164efa009e5be776a52ae16b024d71c8e288ab5c 100644 (file)
 #include <linux/moduleloader.h>
 #include <linux/vmalloc.h>
 
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
 {
        vfree(mod->arch.syminfo);
        mod->arch.syminfo = NULL;
-
-       vfree(module_region);
 }
 
 static inline int check_rela(Elf32_Rela *rela, struct module *module,
@@ -291,12 +289,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
 
        return ret;
 }
-
-int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
-                   struct module *module)
-{
-       vfree(module->arch.syminfo);
-       module->arch.syminfo = NULL;
-
-       return 0;
-}
index 0eca93327195077ec16bdfd99efd7294c6ab2de6..d223a8b57c1eaad282289e75089654153ab598d6 100644 (file)
@@ -142,6 +142,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 6f4bac969bf72e360a6476c55a9e7f667d2d21fb..23eada79439c7abe2734ffdb0945ec66318dff9f 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/device.h>
+#include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
index 08a313fc22418c326987dc36c9c9203de639e270..f772068d9e797e0c236e512ac80254063de07802 100644 (file)
@@ -604,7 +604,7 @@ static ssize_t __sync_serial_read(struct file *file,
                                  struct timespec *ts)
 {
        unsigned long flags;
-       int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+       int dev = MINOR(file_inode(file)->i_rdev);
        int avail;
        struct sync_port *port;
        unsigned char *start;
index 51123f985eb5862a83bf7afcfc22e4bce2ecbda7..af04cb6b6dc9a3777930bd6974a401988cccafcc 100644 (file)
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
 }
 
 /* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_memfree(void *module_region)
 {
        kfree(module_region);
 }
index 1790f22e71a21a859b2b7b1942cbbc503c2d557e..2686a7aa8ec82c50f29592840185b519522c53a7 100644 (file)
@@ -176,6 +176,8 @@ retry:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 67b1d16857593ddc083bf45cc8fd9e19d5430994..0635bd6c2af392fc372b0e02002b1a669a88e33c 100644 (file)
@@ -94,7 +94,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
                                r = &dev->resource[idx];
                                if (!r->start)
                                        continue;
-                               pci_claim_resource(dev, idx);
+                               pci_claim_bridge_resource(dev, idx);
                        }
                }
                pcibios_allocate_bus_resources(&bus->children);
index 9a66372fc7c76019ca874a9c3780c2fc8392266c..ec4917ddf67872aa46b60c6b067b0a67ec5417a4 100644 (file)
@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index f3b51b57740af91e097a7b4b37b4067b5873cd44..95c39b95e97e24f1ed3d7a58cf56dbbefc2ff419 100644 (file)
@@ -11,7 +11,7 @@
 
 
 
-#define NR_syscalls                    318 /* length of syscall table */
+#define NR_syscalls                    319 /* length of syscall table */
 
 /*
  * The following defines stop scripts/checksyscalls.sh from complaining about
index 4c2240c1b0cb4b81e219631750c4e61f82ffb750..461079560c78728848b7631de5efbe700d146620 100644 (file)
 #define __NR_getrandom                 1339
 #define __NR_memfd_create              1340
 #define __NR_bpf                       1341
+#define __NR_execveat                  1342
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
index 615ef81def494ee804deb7252e343dee3cbd7e56..e795cb848154a451bf12c83ce221123686b238b5 100644 (file)
@@ -893,13 +893,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 }
 
 /* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
 {
        return _acpi_map_lsapic(handle, physid, pcpu);
 }
-EXPORT_SYMBOL(acpi_map_lsapic);
+EXPORT_SYMBOL(acpi_map_cpu);
 
-int acpi_unmap_lsapic(int cpu)
+int acpi_unmap_cpu(int cpu)
 {
        ia64_cpu_to_sapicid[cpu] = -1;
        set_cpu_present(cpu, false);
@@ -910,8 +910,7 @@ int acpi_unmap_lsapic(int cpu)
 
        return (0);
 }
-
-EXPORT_SYMBOL(acpi_unmap_lsapic);
+EXPORT_SYMBOL(acpi_unmap_cpu);
 #endif                         /* CONFIG_ACPI_HOTPLUG_CPU */
 
 #ifdef CONFIG_ACPI_NUMA
index f5e96dffc63c3d0ce54399759ceb9a5fefacde15..fcf8b8cbca0be79607808c81aad2b37456bbef89 100644 (file)
@@ -1779,6 +1779,7 @@ sys_call_table:
        data8 sys_getrandom
        data8 sys_memfd_create                  // 1340
        data8 sys_bpf
+       data8 sys_execveat
 
        .org sys_call_table + 8*NR_syscalls     // guard against failures to increase NR_syscalls
 #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
index 24603be24c14acbbcb29874b45fb55518fed3cc7..29754aae5177a94ec257021ab00c57f688a61de8 100644 (file)
@@ -305,14 +305,12 @@ plt_target (struct plt_entry *plt)
 #endif /* !USE_BRL */
 
 void
-module_free (struct module *mod, void *module_region)
+module_arch_freeing_init (struct module *mod)
 {
-       if (mod && mod->arch.init_unw_table &&
-           module_region == mod->module_init) {
+       if (mod->arch.init_unw_table) {
                unw_remove_unwind_table(mod->arch.init_unw_table);
                mod->arch.init_unw_table = NULL;
        }
-       vfree(module_region);
 }
 
 /* Have we already seen one of these relocations? */
index 7225dad87094d81e89459e5a61909fa5b2d10ca0..ba5ba7accd0d6bb4dbab34f7fc307c4306347f4a 100644 (file)
@@ -172,6 +172,8 @@ retry:
                 */
                if (fault & VM_FAULT_OOM) {
                        goto out_of_memory;
+               } else if (fault & VM_FAULT_SIGSEGV) {
+                       goto bad_area;
                } else if (fault & VM_FAULT_SIGBUS) {
                        signal = SIGBUS;
                        goto bad_area;
index 291a582777cf3dd0320f4757ea1f9c1fc39dd17c..900cc93e540961903816fd8d69769a3311fdbbf3 100644 (file)
@@ -487,45 +487,39 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
        return 0;
 }
 
-static int is_valid_resource(struct pci_dev *dev, int idx)
+void pcibios_fixup_device_resources(struct pci_dev *dev)
 {
-       unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
-       struct resource *devr = &dev->resource[idx], *busr;
+       int idx;
 
        if (!dev->bus)
-               return 0;
-
-       pci_bus_for_each_resource(dev->bus, busr, i) {
-               if (!busr || ((busr->flags ^ devr->flags) & type_mask))
-                       continue;
-               if ((devr->start) && (devr->start >= busr->start) &&
-                               (devr->end <= busr->end))
-                       return 1;
-       }
-       return 0;
-}
+               return;
 
-static void pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
-{
-       int i;
+       for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
+               struct resource *r = &dev->resource[idx];
 
-       for (i = start; i < limit; i++) {
-               if (!dev->resource[i].flags)
+               if (!r->flags || r->parent || !r->start)
                        continue;
-               if ((is_valid_resource(dev, i)))
-                       pci_claim_resource(dev, i);
-       }
-}
 
-void pcibios_fixup_device_resources(struct pci_dev *dev)
-{
-       pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
+               pci_claim_resource(dev, idx);
+       }
 }
 EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
 
 static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
 {
-       pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES);
+       int idx;
+
+       if (!dev->bus)
+               return;
+
+       for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
+               struct resource *r = &dev->resource[idx];
+
+               if (!r->flags || r->parent || !r->start)
+                       continue;
+
+               pci_claim_bridge_resource(dev, idx);
+       }
 }
 
 /*
index e9c6a8014bd647eec50a66afb5bc75b076b35e4d..e3d4d4890104cc27e2eb9de2f22cb6f53f939c90 100644 (file)
@@ -200,6 +200,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 75e75d7b1702fb6434c59e9155dbba1d71623b17..244e0dbe45dbeda359e233cde23b4652f0ce13dc 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            355
+#define NR_syscalls            356
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index 2c1bec9a14b67da42a8ed09b644373d0cf35b5ef..61fb6cb9d2ae3c66a1c0c6dec1ac95adb83dd810 100644 (file)
 #define __NR_getrandom         352
 #define __NR_memfd_create      353
 #define __NR_bpf               354
+#define __NR_execveat          355
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index 2ca219e184cd16e6ebad1bd9123061695691c843..a0ec4303f2c8e57a04fb353178d43b0be6a461fe 100644 (file)
@@ -375,4 +375,5 @@ ENTRY(sys_call_table)
        .long sys_getrandom
        .long sys_memfd_create
        .long sys_bpf
+       .long sys_execveat              /* 355 */
 
index 2bd7487440c455802dac6470ec05ac138148bfcb..b2f04aee46ecc2f7a5fb1db26d8e4279f6b6ea2e 100644 (file)
@@ -145,6 +145,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto map_err;
                else if (fault & VM_FAULT_SIGBUS)
                        goto bus_err;
                BUG();
index 332680e5ebf23c7909b796c415c2273efd77ba3c..2de5dc695a87fa96d41a83e127166a7126d10df0 100644 (file)
@@ -141,6 +141,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index fa4cf52aa7a6d386711690005a314ece7d67fc53..d46a5ebb7570e07869ea03b9b995374aa3bff82e 100644 (file)
@@ -224,6 +224,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index b30e41c0c0335cf2ab79e716c9c41c0ebced18e8..48528fb81effa07ef5c992c08efba2cad6a75ff0 100644 (file)
@@ -1026,6 +1026,8 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
                         pr, (pr && pr->name) ? pr->name : "nil");
 
                if (pr && !(pr->flags & IORESOURCE_UNSET)) {
+                       struct pci_dev *dev = bus->self;
+
                        if (request_resource(pr, res) == 0)
                                continue;
                        /*
@@ -1035,6 +1037,12 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
                         */
                        if (reparent_resources(pr, res) == 0)
                                continue;
+
+                       if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
+                           pci_claim_bridge_resource(dev,
+                                                i + PCI_BRIDGE_RESOURCES) == 0)
+                               continue;
+
                }
                pr_warn("PCI: Cannot allocate resource region ");
                pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number);
@@ -1227,7 +1235,10 @@ void pcibios_claim_one_bus(struct pci_bus *bus)
                                 (unsigned long long)r->end,
                                 (unsigned int)r->flags);
 
-                       pci_claim_resource(dev, i);
+                       if (pci_claim_resource(dev, i) == 0)
+                               continue;
+
+                       pci_claim_bridge_resource(dev, i);
                }
        }
 
index becc42bb18495adf98389bd039bc111c1893cedd..70ab5d664332694e92305331f13ed15a35ab1956 100644 (file)
@@ -158,6 +158,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 9fd6834a2172ac3cd77115d604a2f07ac8370bc2..5d6139390bf830adf503d67d004a5322d8eb7ad4 100644 (file)
@@ -1388,7 +1388,7 @@ out:
 void bpf_jit_free(struct bpf_prog *fp)
 {
        if (fp->jited)
-               module_free(NULL, fp->bpf_func);
+               module_memfree(fp->bpf_func);
 
        bpf_prog_unlock_free(fp);
 }
index 3516cbdf1ee93acb82ebef6428f79df9af104514..0c2cc5d39c8e37ce1cfe5be191902bc435c41090 100644 (file)
@@ -262,6 +262,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index febb9cd83177177e4c0edffa6d4c2d34b4a2b2a0..b5b036f64275b0fe0176132b74f4715f185f7503 100644 (file)
@@ -106,7 +106,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
                                if (!r->flags)
                                        continue;
                                if (!r->start ||
-                                   pci_claim_resource(dev, idx) < 0) {
+                                   pci_claim_bridge_resource(dev, idx) < 0) {
                                        printk(KERN_ERR "PCI:"
                                               " Cannot allocate resource"
                                               " region %d of bridge %s\n",
index 6b4339f8c9c2e1e757f13fddccb9053c2c292b0c..471ff398090cd88e89089b5387d1a1f4dc0dcc0a 100644 (file)
@@ -281,42 +281,37 @@ static int __init pci_check_direct(void)
        return -ENODEV;
 }
 
-static int is_valid_resource(struct pci_dev *dev, int idx)
+static void pcibios_fixup_device_resources(struct pci_dev *dev)
 {
-       unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
-       struct resource *devr = &dev->resource[idx], *busr;
-
-       if (dev->bus) {
-               pci_bus_for_each_resource(dev->bus, busr, i) {
-                       if (!busr || (busr->flags ^ devr->flags) & type_mask)
-                               continue;
-
-                       if (devr->start &&
-                           devr->start >= busr->start &&
-                           devr->end <= busr->end)
-                               return 1;
-               }
-       }
+       int idx;
 
-       return 0;
+       if (!dev->bus)
+               return;
+
+       for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
+               struct resource *r = &dev->resource[idx];
+
+               if (!r->flags || r->parent || !r->start)
+                       continue;
+
+               pci_claim_resource(dev, idx);
+       }
 }
 
-static void pcibios_fixup_device_resources(struct pci_dev *dev)
+static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
 {
-       int limit, i;
+       int idx;
 
-       if (dev->bus->number != 0)
+       if (!dev->bus)
                return;
 
-       limit = (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) ?
-               PCI_BRIDGE_RESOURCES : PCI_NUM_RESOURCES;
+       for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
+               struct resource *r = &dev->resource[idx];
 
-       for (i = 0; i < limit; i++) {
-               if (!dev->resource[i].flags)
+               if (!r->flags || r->parent || !r->start)
                        continue;
 
-               if (is_valid_resource(dev, i))
-                       pci_claim_resource(dev, i);
+               pci_claim_bridge_resource(dev, idx);
        }
 }
 
@@ -330,7 +325,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
 
        if (bus->self) {
                pci_read_bridge_bases(bus);
-               pcibios_fixup_device_resources(bus->self);
+               pcibios_fixup_bridge_resources(bus->self);
        }
 
        list_for_each_entry(dev, &bus->devices, bus_list)
index 51d5bb90d3e504e6b3480bf4cb51be0598d58a22..a223691dff4fb1699c52b3e025ebb6446394fbb0 100644 (file)
@@ -72,6 +72,7 @@ void __init setup_cpuinfo(void)
        cpuinfo.has_div = fcpu_has(cpu, "altr,has-div");
        cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul");
        cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx");
+       cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu");
 
        if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div)
                err_cpu("DIV");
index 83bca17d1008f844857ca1dca98b00a4158f6e38..0bdfd13ff98bbbbd5af7fc4251abf1c618d63a94 100644 (file)
@@ -365,30 +365,14 @@ ENTRY(ret_from_interrupt)
        GET_THREAD_INFO r1
        ldw     r4, TI_PREEMPT_COUNT(r1)
        bne     r4, r0, restore_all
-
-need_resched:
        ldw     r4, TI_FLAGS(r1)                /* ? Need resched set */
        BTBZ    r10, r4, TIF_NEED_RESCHED, restore_all
        ldw     r4, PT_ESTATUS(sp)      /* ? Interrupts off */
        andi    r10, r4, ESTATUS_EPIE
        beq     r10, r0, restore_all
-       movia   r4, PREEMPT_ACTIVE
-       stw     r4, TI_PREEMPT_COUNT(r1)
-       rdctl   r10, status             /* enable intrs again */
-       ori     r10, r10 ,STATUS_PIE
-       wrctl   status, r10
-       PUSH    r1
-       call    schedule
-       POP     r1
-       mov     r4, r0
-       stw     r4, TI_PREEMPT_COUNT(r1)
-       rdctl   r10, status             /* disable intrs */
-       andi    r10, r10, %lo(~STATUS_PIE)
-       wrctl   status, r10
-       br      need_resched
-#else
-       br      restore_all
+       call    preempt_schedule_irq
 #endif
+       br      restore_all
 
 /***********************************************************************
  * A few syscall wrappers
index cc924a38f22a0fef473a3c6d871789fdacf8f7f9..e2e3f13f98d55a811ef30db3493dbd71a8e86eb0 100644 (file)
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
 }
 
 /* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_memfree(void *module_region)
 {
        kfree(module_region);
 }
index f9d27883a7148729545489c14bb678122da74e88..2d0ea25be1717de06d8cd138032dc5c7c5f3970d 100644 (file)
@@ -200,7 +200,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
 
        /* Set up to return from userspace; jump to fixed address sigreturn
           trampoline on kuser page.  */
-       regs->ra = (unsigned long) (0x1040);
+       regs->ra = (unsigned long) (0x1044);
 
        /* Set up registers for signal handler */
        regs->sp = (unsigned long) frame;
index 15a0bb5fc06d970a6c3dcd92ce31173a8914670b..34429d5a0ccde65e0bd18a88dbc47a2968d6c793 100644 (file)
@@ -135,6 +135,8 @@ survive:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 0703acf7d3276811919fd3d398ada99b1b9c6d50..230ac20ae7944f71636e5083fdaf3f034eb10af2 100644 (file)
@@ -171,6 +171,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 50dfafc3f2c103aa3567f2fb88aeb94865847517..5822e8e200e6be1ab110b15baf94d4581624099c 100644 (file)
@@ -298,14 +298,10 @@ static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
 }
 #endif
 
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
 {
        kfree(mod->arch.section);
        mod->arch.section = NULL;
-
-       vfree(module_region);
 }
 
 /* Additional bytes needed in front of individual sections */
index 3ca9c1131cfe0d80b9b12fb5c0e599a3363942c0..e5120e653240c4fa52d4895c7d1d206d3d12e68c 100644 (file)
@@ -256,6 +256,8 @@ good_area:
                 */
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto bad_area;
                BUG();
index d3feba5a275f6465fa46b850ed57cb688bd834ea..c154cebc1041c4ac24d72c5e51b93bfec2f0d9ee 100644 (file)
@@ -154,4 +154,5 @@ module_exit(sha1_powerpc_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
 
+MODULE_ALIAS_CRYPTO("sha1");
 MODULE_ALIAS_CRYPTO("sha1-powerpc");
index 19c36cba37c4acac5e63e7c4d8a093b4dbe65781..a46f5f45570c8904a5a13de12ecb3edfed5c2449 100644 (file)
@@ -86,6 +86,11 @@ extern int overlaps_crashkernel(unsigned long start, unsigned long size);
 extern void reserve_crashkernel(void);
 extern void machine_kexec_mask_interrupts(void);
 
+static inline bool kdump_in_progress(void)
+{
+       return crashing_cpu >= 0;
+}
+
 #else /* !CONFIG_KEXEC */
 static inline void crash_kexec_secondary(struct pt_regs *regs) { }
 
@@ -106,6 +111,11 @@ static inline int crash_shutdown_unregister(crash_shutdown_t handler)
        return 0;
 }
 
+static inline bool kdump_in_progress(void)
+{
+       return false;
+}
+
 #endif /* CONFIG_KEXEC */
 #endif /* ! __ASSEMBLY__ */
 #endif /* __KERNEL__ */
index ce9577d693be1c7849c18ac0259a35525d46c6fe..91062eef582f9c1ed8d824f9e16bcde8a0f8714c 100644 (file)
@@ -366,3 +366,4 @@ SYSCALL_SPU(seccomp)
 SYSCALL_SPU(getrandom)
 SYSCALL_SPU(memfd_create)
 SYSCALL_SPU(bpf)
+COMPAT_SYS(execveat)
index ebc4f165690a9fc0113864b6857708b5df8c317a..0be6c681cab1341061c02031464d5355ff8a4d7d 100644 (file)
@@ -23,9 +23,9 @@
 #define THREAD_SIZE            (1 << THREAD_SHIFT)
 
 #ifdef CONFIG_PPC64
-#define CURRENT_THREAD_INFO(dest, sp)  clrrdi dest, sp, THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp)  stringify_in_c(clrrdi dest, sp, THREAD_SHIFT)
 #else
-#define CURRENT_THREAD_INFO(dest, sp)  rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp)  stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT)
 #endif
 
 #ifndef __ASSEMBLY__
@@ -71,12 +71,13 @@ struct thread_info {
 #define THREAD_SIZE_ORDER      (THREAD_SHIFT - PAGE_SHIFT)
 
 /* how to get the thread information struct from C */
-register unsigned long __current_r1 asm("r1");
 static inline struct thread_info *current_thread_info(void)
 {
-       /* gcc4, at least, is smart enough to turn this into a single
-        * rlwinm for ppc32 and clrrdi for ppc64 */
-       return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1));
+       unsigned long val;
+
+       asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val));
+
+       return (struct thread_info *)val;
 }
 
 #endif /* __ASSEMBLY__ */
index e0da021caa004205fc8b645d95b3047c8cd9b73e..36b79c31eedda5cb090e73cafc6a64c937fcc5f7 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls          362
+#define __NR_syscalls          363
 
 #define __NR__exit __NR_exit
 #define NR_syscalls    __NR_syscalls
index f55351f2e66e962097bc078c25f77a176ca52e2a..ef5b5b1f31231648135ed092af027933c8dc3f06 100644 (file)
 #define __NR_getrandom         359
 #define __NR_memfd_create      360
 #define __NR_bpf               361
+#define __NR_execveat          362
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 879b3aacac3282d8b9de5101c9349d2aea9b8edc..f96d1ec241891b9683761d3e04c2daed29d670d1 100644 (file)
@@ -330,7 +330,7 @@ void default_machine_kexec(struct kimage *image)
         * using debugger IPI.
         */
 
-       if (crashing_cpu == -1)
+       if (!kdump_in_progress())
                kexec_prepare_cpus();
 
        pr_debug("kexec: Starting switchover sequence.\n");
index 37d512d35943400737d36be40448c063f9ee76f0..2a525c938158e7937445837ef6448354b5147758 100644 (file)
@@ -1184,6 +1184,8 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
                         pr, (pr && pr->name) ? pr->name : "nil");
 
                if (pr && !(pr->flags & IORESOURCE_UNSET)) {
+                       struct pci_dev *dev = bus->self;
+
                        if (request_resource(pr, res) == 0)
                                continue;
                        /*
@@ -1193,6 +1195,11 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
                         */
                        if (reparent_resources(pr, res) == 0)
                                continue;
+
+                       if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
+                           pci_claim_bridge_resource(dev,
+                                               i + PCI_BRIDGE_RESOURCES) == 0)
+                               continue;
                }
                pr_warning("PCI: Cannot allocate resource region "
                           "%d of PCI bridge %d, will remap\n", i, bus->number);
@@ -1401,7 +1408,10 @@ void pcibios_claim_one_bus(struct pci_bus *bus)
                                 (unsigned long long)r->end,
                                 (unsigned int)r->flags);
 
-                       pci_claim_resource(dev, i);
+                       if (pci_claim_resource(dev, i) == 0)
+                               continue;
+
+                       pci_claim_bridge_resource(dev, i);
                }
        }
 
index 8ec017cb44461943c90ebdb6cdf6e007936efb39..8b2d2dc8ef106ef780c9a145335e9de17b3879a7 100644 (file)
@@ -700,6 +700,7 @@ void start_secondary(void *unused)
        smp_store_cpu_info(cpu);
        set_dec(tb_ticks_per_jiffy);
        preempt_disable();
+       cpu_callin_map[cpu] = 1;
 
        if (smp_ops->setup_cpu)
                smp_ops->setup_cpu(cpu);
@@ -738,14 +739,6 @@ void start_secondary(void *unused)
        notify_cpu_starting(cpu);
        set_cpu_online(cpu, true);
 
-       /*
-        * CPU must be marked active and online before we signal back to the
-        * master, because the scheduler needs to see the cpu_online and
-        * cpu_active bits set.
-        */
-       smp_wmb();
-       cpu_callin_map[cpu] = 1;
-
        local_irq_enable();
 
        cpu_startup_entry(CPUHP_ONLINE);
index 5a236f082c78386a47b9b415f98f619e8e615688..1b5305d4bdabe95c4f4430b89c0fb56512bb6fdf 100644 (file)
@@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
                if (*flt & VM_FAULT_OOM) {
                        ret = -ENOMEM;
                        goto out_unlock;
-               } else if (*flt & VM_FAULT_SIGBUS) {
+               } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
                        ret = -EFAULT;
                        goto out_unlock;
                }
index eb79907f34fac2df170be8fcb3a14a9cbf400b1e..6154b0a2b06331f0c29efe56b210baa6f90d43c7 100644 (file)
@@ -437,6 +437,8 @@ good_area:
         */
        fault = handle_mm_fault(mm, vma, address, flags);
        if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
+               if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                rc = mm_fault_error(regs, address, fault);
                if (rc >= MM_FAULT_RETURN)
                        goto bail;
index 1ca125b9c226070eefca744857b203648055b131..d1916b577f2c9a71c3fb3a5ee419925f070412d0 100644 (file)
@@ -699,7 +699,7 @@ out:
 void bpf_jit_free(struct bpf_prog *fp)
 {
        if (fp->jited)
-               module_free(NULL, fp->bpf_func);
+               module_memfree(fp->bpf_func);
 
        bpf_prog_unlock_free(fp);
 }
index 54eca8b3b288f8fcca45ef5f44c5211832b3416f..0509bca5e830b656c8a553c047740b68b172f4b1 100644 (file)
@@ -40,7 +40,6 @@ BEGIN_FTR_SECTION;                                            \
        b       1f;                                             \
 END_FTR_SECTION(0, 1);                                         \
        ld      r12,opal_tracepoint_refcount@toc(r2);           \
-       std     r12,32(r1);                                     \
        cmpdi   r12,0;                                          \
        bne-    LABEL;                                          \
 1:
index b700a329c31d448444d0f48e0fdd0b9176f1a825..d2de7d5d7574ca48fb1f31aa5c6892a510107ade 100644 (file)
@@ -304,7 +304,7 @@ int pnv_save_sprs_for_winkle(void)
         * all cpus at boot. Get these reg values of current cpu and use the
         * same accross all cpus.
         */
-       uint64_t lpcr_val = mfspr(SPRN_LPCR);
+       uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
        uint64_t hid0_val = mfspr(SPRN_HID0);
        uint64_t hid1_val = mfspr(SPRN_HID1);
        uint64_t hid4_val = mfspr(SPRN_HID4);
index 469751d9200469c5220be3da3ec573207d9c1100..b5682fd6c9846b2cdb259720e35a2326cf7c45a1 100644 (file)
@@ -43,6 +43,7 @@
 #include <asm/trace.h>
 #include <asm/firmware.h>
 #include <asm/plpar_wrappers.h>
+#include <asm/kexec.h>
 #include <asm/fadump.h>
 
 #include "pseries.h"
@@ -267,8 +268,13 @@ static void pSeries_lpar_hptab_clear(void)
                 * out to the user, but at least this will stop us from
                 * continuing on further and creating an even more
                 * difficult to debug situation.
+                *
+                * There is a known problem when kdump'ing, if cpus are offline
+                * the above call will fail. Rather than panicking again, keep
+                * going and hope the kdump kernel is also little endian, which
+                * it usually is.
                 */
-               if (rc)
+               if (rc && !kdump_in_progress())
                        panic("Could not enable big endian exceptions");
        }
 #endif
index 5b150f0c5df94a39587ea6b519e12192d28bce3c..13c6e200b24ec5bc2a7927308eaf579f1a904cd5 100644 (file)
@@ -337,6 +337,7 @@ static inline void disable_surveillance(void)
        args.token = rtas_token("set-indicator");
        if (args.token == RTAS_UNKNOWN_SERVICE)
                return;
+       args.token = cpu_to_be32(args.token);
        args.nargs = cpu_to_be32(3);
        args.nret = cpu_to_be32(1);
        args.rets = &args.args[3];
index 32040ace00ea2431a18428dca5c34c0c4ebde10c..afbe07907c10b6304e52b5eb234d33694fd9693a 100644 (file)
@@ -231,7 +231,7 @@ failed:
 struct dbfs_d2fc_hdr {
        u64     len;            /* Length of d2fc buffer without header */
        u16     version;        /* Version of header */
-       char    tod_ext[16];    /* TOD clock for d2fc */
+       char    tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */
        u64     count;          /* Number of VM guests in d2fc buffer */
        char    reserved[30];
 } __attribute__ ((packed));
index 37b9091ab8c010c88a22bb2851f0133b10c6f032..16aa0c779e0762e210fe6652a0a5efda24771381 100644 (file)
@@ -36,7 +36,7 @@ static inline notrace void __arch_local_irq_ssm(unsigned long flags)
 
 static inline notrace unsigned long arch_local_save_flags(void)
 {
-       return __arch_local_irq_stosm(0x00);
+       return __arch_local_irq_stnsm(0xff);
 }
 
 static inline notrace unsigned long arch_local_irq_save(void)
index 8beee1cceba4ed17831736a11c6f5167155335d6..98eb2a5792234d9d12c303bdb1301f869f706b60 100644 (file)
@@ -67,20 +67,22 @@ static inline void local_tick_enable(unsigned long long comp)
        set_clock_comparator(S390_lowcore.clock_comparator);
 }
 
-#define CLOCK_TICK_RATE        1193180 /* Underlying HZ */
+#define CLOCK_TICK_RATE                1193180 /* Underlying HZ */
+#define STORE_CLOCK_EXT_SIZE   16      /* stcke writes 16 bytes */
 
 typedef unsigned long long cycles_t;
 
-static inline void get_tod_clock_ext(char clk[16])
+static inline void get_tod_clock_ext(char *clk)
 {
-       typedef struct { char _[sizeof(clk)]; } addrtype;
+       typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype;
 
        asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
 }
 
 static inline unsigned long long get_tod_clock(void)
 {
-       unsigned char clk[16];
+       unsigned char clk[STORE_CLOCK_EXT_SIZE];
+
        get_tod_clock_ext(clk);
        return *((unsigned long long *)&clk[1]);
 }
index 2b446cf0cc65543d38defaf1985d4246f767449b..67878af257a083c531140f74b951019e2a1537e0 100644 (file)
 #define __NR_bpf               351
 #define __NR_s390_pci_mmio_write       352
 #define __NR_s390_pci_mmio_read                353
-#define NR_syscalls 354
+#define __NR_execveat          354
+#define NR_syscalls 355
 
 /* 
  * There are some system calls that are not present on 64 bit, some
index b89b59158b9592317479422b14cfb8e7779fa722..409d152585bea67a6aca845bb0c3e4db130b1505 100644 (file)
@@ -55,14 +55,10 @@ void *module_alloc(unsigned long size)
 }
 #endif
 
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
 {
-       if (mod) {
-               vfree(mod->arch.syminfo);
-               mod->arch.syminfo = NULL;
-       }
-       vfree(module_region);
+       vfree(mod->arch.syminfo);
+       mod->arch.syminfo = NULL;
 }
 
 static void check_rela(Elf_Rela *rela, struct module *me)
index a2987243bc76c89bd07a6d4a298825bea9664ab8..939ec474b1dd705e7814f94c94df4faeb8009aa2 100644 (file)
@@ -362,3 +362,4 @@ SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
 SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
 SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
 SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
+SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat)
index f6b3cd056ec22c1c28b908c4cefc46bc4a4e2099..cc7328080b609a653b72c1ca2c6989f054e3be74 100644 (file)
@@ -48,6 +48,30 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
        return false;
 }
 
+static int check_per_event(unsigned short cause, unsigned long control,
+                          struct pt_regs *regs)
+{
+       if (!(regs->psw.mask & PSW_MASK_PER))
+               return 0;
+       /* user space single step */
+       if (control == 0)
+               return 1;
+       /* over indication for storage alteration */
+       if ((control & 0x20200000) && (cause & 0x2000))
+               return 1;
+       if (cause & 0x8000) {
+               /* all branches */
+               if ((control & 0x80800000) == 0x80000000)
+                       return 1;
+               /* branch into selected range */
+               if (((control & 0x80800000) == 0x80800000) &&
+                   regs->psw.addr >= current->thread.per_user.start &&
+                   regs->psw.addr <= current->thread.per_user.end)
+                       return 1;
+       }
+       return 0;
+}
+
 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
 {
        int fixup = probe_get_fixup_type(auprobe->insn);
@@ -71,9 +95,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
                if (regs->psw.addr - utask->xol_vaddr == ilen)
                        regs->psw.addr = utask->vaddr + ilen;
        }
-       /* If per tracing was active generate trap */
-       if (regs->psw.mask & PSW_MASK_PER)
-               do_per_trap(regs);
+       if (check_per_event(current->thread.per_event.cause,
+                           current->thread.per_user.control, regs)) {
+               /* fix per address */
+               current->thread.per_event.address = utask->vaddr;
+               /* trigger per event */
+               set_pt_regs_flag(regs, PIF_PER_TRAP);
+       }
        return 0;
 }
 
@@ -106,6 +134,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
        clear_thread_flag(TIF_UPROBE_SINGLESTEP);
        regs->int_code = auprobe->saved_int_code;
        regs->psw.addr = current->utask->vaddr;
+       current->thread.per_event.address = current->utask->vaddr;
 }
 
 unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
@@ -146,17 +175,20 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
        __rc;                                           \
 })
 
-#define emu_store_ril(ptr, input)                      \
+#define emu_store_ril(regs, ptr, input)                        \
 ({                                                     \
        unsigned int mask = sizeof(*(ptr)) - 1;         \
+       __typeof__(ptr) __ptr = (ptr);                  \
        int __rc = 0;                                   \
                                                        \
        if (!test_facility(34))                         \
                __rc = EMU_ILLEGAL_OP;                  \
-       else if ((u64 __force)ptr & mask)               \
+       else if ((u64 __force)__ptr & mask)             \
                __rc = EMU_SPECIFICATION;               \
-       else if (put_user(*(input), ptr))               \
+       else if (put_user(*(input), __ptr))             \
                __rc = EMU_ADDRESSING;                  \
+       if (__rc == 0)                                  \
+               sim_stor_event(regs, __ptr, mask + 1);  \
        __rc;                                           \
 })
 
@@ -197,6 +229,25 @@ union split_register {
        s16 s16[4];
 };
 
+/*
+ * If user per registers are setup to trace storage alterations and an
+ * emulated store took place on a fitting address a user trap is generated.
+ */
+static void sim_stor_event(struct pt_regs *regs, void *addr, int len)
+{
+       if (!(regs->psw.mask & PSW_MASK_PER))
+               return;
+       if (!(current->thread.per_user.control & PER_EVENT_STORE))
+               return;
+       if ((void *)current->thread.per_user.start > (addr + len))
+               return;
+       if ((void *)current->thread.per_user.end < addr)
+               return;
+       current->thread.per_event.address = regs->psw.addr;
+       current->thread.per_event.cause = PER_EVENT_STORE >> 16;
+       set_pt_regs_flag(regs, PIF_PER_TRAP);
+}
+
 /*
  * pc relative instructions are emulated, since parameters may not be
  * accessible from the xol area due to range limitations.
@@ -249,13 +300,13 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
                        rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
                        break;
                case 0x07: /* sthrl */
-                       rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]);
+                       rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]);
                        break;
                case 0x0b: /* stgrl */
-                       rc = emu_store_ril((u64 __user *)uptr, &rx->u64);
+                       rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64);
                        break;
                case 0x0f: /* strl */
-                       rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]);
+                       rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
                        break;
                }
                break;
index 7f0089d9a4aa47ef86e7691502281c140ee637c3..e34122e539a16bad4e5727f881f982e4829cd8f0 100644 (file)
@@ -128,8 +128,6 @@ void vtime_account_irq_enter(struct task_struct *tsk)
        struct thread_info *ti = task_thread_info(tsk);
        u64 timer, system;
 
-       WARN_ON_ONCE(!irqs_disabled());
-
        timer = S390_lowcore.last_update_timer;
        S390_lowcore.last_update_timer = get_vtimer();
        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
index 811937bb90be69a18f57621d1be7e6fbfc12d423..9065d5aa3932dd7f6637069e493f2ad4a3ad72f3 100644 (file)
@@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
                                do_no_context(regs);
                        else
                                pagefault_out_of_memory();
+               } else if (fault & VM_FAULT_SIGSEGV) {
+                       /* Kernel mode? Handle exceptions or die */
+                       if (!user_mode(regs))
+                               do_no_context(regs);
+                       else
+                               do_sigsegv(regs, SEGV_MAPERR);
                } else if (fault & VM_FAULT_SIGBUS) {
                        /* Kernel mode? Handle exceptions or die */
                        if (!user_mode(regs))
index be99357d238c68e34dee133c52053e23c88a34d4..3cf8cc03fff60d7a59e7b23f4e92d477061a5f42 100644 (file)
@@ -322,11 +322,12 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
 {
        struct page *page;
-       unsigned long offset;
+       unsigned long offset, mask;
 
        offset = (unsigned long) entry / sizeof(unsigned long);
        offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
-       page = pmd_to_page((pmd_t *) entry);
+       mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
+       page = virt_to_page((void *)((unsigned long) entry & mask));
        return page->index + offset;
 }
 
index 7e45d13816c183e46962e75c3dc4817d72d5b223..ba44c9f5534633a2da3133714f6b018ba6a69545 100644 (file)
@@ -22,8 +22,8 @@
  * skb_copy_bits takes 4 parameters:
  *   %r2 = skb pointer
  *   %r3 = offset into skb data
- *   %r4 = length to copy
- *   %r5 = pointer to temp buffer
+ *   %r4 = pointer to temp buffer
+ *   %r5 = length to copy
  */
 #define SKBDATA        %r8
 
@@ -44,8 +44,9 @@ ENTRY(sk_load_word)
 
 sk_load_word_slow:
        lgr     %r9,%r2                 # save %r2
-       lhi     %r4,4                   # 4 bytes
-       la      %r5,160(%r15)           # pointer to temp buffer
+       lgr     %r3,%r1                 # offset
+       la      %r4,160(%r15)           # pointer to temp buffer
+       lghi    %r5,4                   # 4 bytes
        brasl   %r14,skb_copy_bits      # get data from skb
        l       %r5,160(%r15)           # load result from temp buffer
        ltgr    %r2,%r2                 # set cc to (%r2 != 0)
@@ -69,8 +70,9 @@ ENTRY(sk_load_half)
 
 sk_load_half_slow:
        lgr     %r9,%r2                 # save %r2
-       lhi     %r4,2                   # 2 bytes
-       la      %r5,162(%r15)           # pointer to temp buffer
+       lgr     %r3,%r1                 # offset
+       la      %r4,162(%r15)           # pointer to temp buffer
+       lghi    %r5,2                   # 2 bytes
        brasl   %r14,skb_copy_bits      # get data from skb
        xc      160(2,%r15),160(%r15)
        l       %r5,160(%r15)           # load result from temp buffer
@@ -95,8 +97,9 @@ ENTRY(sk_load_byte)
 
 sk_load_byte_slow:
        lgr     %r9,%r2                 # save %r2
-       lhi     %r4,1                   # 1 bytes
-       la      %r5,163(%r15)           # pointer to temp buffer
+       lgr     %r3,%r1                 # offset
+       la      %r4,163(%r15)           # pointer to temp buffer
+       lghi    %r5,1                   # 1 byte
        brasl   %r14,skb_copy_bits      # get data from skb
        xc      160(3,%r15),160(%r15)
        l       %r5,160(%r15)           # load result from temp buffer
@@ -104,11 +107,11 @@ sk_load_byte_slow:
        lgr     %r2,%r9                 # restore %r2
        br      %r8
 
-       /* A = (*(u8 *)(skb->data+K) & 0xf) << 2 */
+       /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
 ENTRY(sk_load_byte_msh)
        llgfr   %r1,%r3                 # extend offset
        clr     %r11,%r3                # hlen < offset ?
-       jle     sk_load_byte_slow
+       jle     sk_load_byte_msh_slow
        lhi     %r12,0
        ic      %r12,0(%r1,%r10)        # get byte from skb
        nill    %r12,0x0f
@@ -118,8 +121,9 @@ ENTRY(sk_load_byte_msh)
 
 sk_load_byte_msh_slow:
        lgr     %r9,%r2                 # save %r2
-       lhi     %r4,2                   # 2 bytes
-       la      %r5,162(%r15)           # pointer to temp buffer
+       lgr     %r3,%r1                 # offset
+       la      %r4,163(%r15)           # pointer to temp buffer
+       lghi    %r5,1                   # 1 byte
        brasl   %r14,skb_copy_bits      # get data from skb
        xc      160(3,%r15),160(%r15)
        l       %r12,160(%r15)          # load result from temp buffer
index c52ac77408ca5cac7ad50b6a2a8a3113d8af30ca..bbd1981cc15007fcdb779ca201553ed28012f20d 100644 (file)
@@ -431,8 +431,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
                EMIT4_DISP(0x88500000, K);
                break;
        case BPF_ALU | BPF_NEG: /* A = -A */
-               /* lnr %r5,%r5 */
-               EMIT2(0x1155);
+               /* lcr %r5,%r5 */
+               EMIT2(0x1355);
                break;
        case BPF_JMP | BPF_JA: /* ip += K */
                offset = addrs[i + K] + jit->start - jit->prg;
@@ -448,15 +448,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
                mask = 0x800000; /* je */
 kbranch:       /* Emit compare if the branch targets are different */
                if (filter->jt != filter->jf) {
-                       if (K <= 16383)
-                               /* chi %r5,<K> */
-                               EMIT4_IMM(0xa75e0000, K);
-                       else if (test_facility(21))
+                       if (test_facility(21))
                                /* clfi %r5,<K> */
                                EMIT6_IMM(0xc25f0000, K);
                        else
-                               /* c %r5,<d(K)>(%r13) */
-                               EMIT4_DISP(0x5950d000, EMIT_CONST(K));
+                               /* cl %r5,<d(K)>(%r13) */
+                               EMIT4_DISP(0x5550d000, EMIT_CONST(K));
                }
 branch:                if (filter->jt == filter->jf) {
                        if (filter->jt == 0)
@@ -502,8 +499,8 @@ branch:             if (filter->jt == filter->jf) {
 xbranch:       /* Emit compare if the branch targets are different */
                if (filter->jt != filter->jf) {
                        jit->seen |= SEEN_XREG;
-                       /* cr %r5,%r12 */
-                       EMIT2(0x195c);
+                       /* clr %r5,%r12 */
+                       EMIT2(0x155c);
                }
                goto branch;
        case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
index 52238983527d605914853fd5415ea39617944ffe..6860beb2a280d0a4a65a67c89ad2201b33513068 100644 (file)
@@ -114,6 +114,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 541dc610150888e706977c7944c42ab1d61d7437..a58fec9b55e016df85cdfb7c214cc385e300479c 100644 (file)
@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
        } else {
                if (fault & VM_FAULT_SIGBUS)
                        do_sigbus(regs, error_code, address);
+               else if (fault & VM_FAULT_SIGSEGV)
+                       bad_area(regs, error_code, address);
                else
                        BUG();
        }
index b36365f49478c573d715816d53582e2dfb153227..9ce5afe167ff509288b21605a2f9a35f96ff36dc 100644 (file)
@@ -639,7 +639,10 @@ static void pci_claim_bus_resources(struct pci_bus *bus)
                                       (unsigned long long)r->end,
                                       (unsigned int)r->flags);
 
-                       pci_claim_resource(dev, i);
+                       if (pci_claim_resource(dev, i) == 0)
+                               continue;
+
+                       pci_claim_bridge_resource(dev, i);
                }
        }
 
index 908e8c17c902bef419877cd1bedcc896b9627636..70d817154fe8bfd04aeaa71f45f15667f4962c23 100644 (file)
@@ -249,6 +249,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 18fcd71670959291f8ef4933e37d5bc394e98f51..4798232494294a7ece0bef232216dd4a26408d88 100644 (file)
@@ -446,6 +446,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index f33e7c7a3bf74d48232e0c9708877cb63a6a4aaa..7931eeeb649af45af45aaa49a20fa727a6aecd40 100644 (file)
@@ -776,7 +776,7 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf];
                                if (unlikely(proglen + ilen > oldproglen)) {
                                        pr_err("bpb_jit_compile fatal error\n");
                                        kfree(addrs);
-                                       module_free(NULL, image);
+                                       module_memfree(image);
                                        return;
                                }
                                memcpy(image + proglen, temp, ilen);
@@ -822,7 +822,7 @@ out:
 void bpf_jit_free(struct bpf_prog *fp)
 {
        if (fp->jited)
-               module_free(NULL, fp->bpf_func);
+               module_memfree(fp->bpf_func);
 
        bpf_prog_unlock_free(fp);
 }
index 96447c9160a0697f5a756f0a2acc945c4cb3efb6..2305084c9b93b72df9f5fba6f6302037d0fb43a7 100644 (file)
@@ -74,7 +74,7 @@ error:
 
 
 /* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_memfree(void *module_region)
 {
        vfree(module_region);
 
@@ -83,7 +83,7 @@ void module_free(struct module *mod, void *module_region)
                     0, 0, 0, NULL, NULL, 0);
 
        /*
-        * FIXME: If module_region == mod->module_init, trim exception
+        * FIXME: Add module_arch_freeing_init to trim exception
         * table entries.
         */
 }
index 565e25a98334201ee031d09381ea570a2fcbda03..0f61a73534e6d7c41ccf56ee71244f926908f6d0 100644 (file)
@@ -442,6 +442,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 87bc86821bc9b81380f46ac38557ad537fd0a442..d195a87ca542b75e919055b30a72bf60f1ba132c 100644 (file)
@@ -3,6 +3,7 @@ config UML
        default y
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_UID16
+       select HAVE_FUTEX_CMPXCHG if FUTEX
        select GENERIC_IRQ_SHOW
        select GENERIC_CPU_DEVICES
        select GENERIC_IO
index 5678c3571e7cb4d1572d0b16a91b0650f76095c7..209617302df89e02994b7c1e45df4340826bad05 100644 (file)
@@ -80,6 +80,8 @@ good_area:
                if (unlikely(fault & VM_FAULT_ERROR)) {
                        if (fault & VM_FAULT_OOM) {
                                goto out_of_memory;
+                       } else if (fault & VM_FAULT_SIGSEGV) {
+                               goto out;
                        } else if (fault & VM_FAULT_SIGBUS) {
                                err = -EACCES;
                                goto out;
index ba397bde79482043d46e0e90d0bd6d7e71daa110..0dc9d0144a27957d2bd2cdadf3b141a3195ccab0 100644 (file)
@@ -857,7 +857,7 @@ source "kernel/Kconfig.preempt"
 
 config X86_UP_APIC
        bool "Local APIC support on uniprocessors"
-       depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
+       depends on X86_32 && !SMP && !X86_32_NON_STANDARD
        ---help---
          A local APIC (Advanced Programmable Interrupt Controller) is an
          integrated interrupt controller in the CPU. If you have a single-CPU
@@ -868,6 +868,10 @@ config X86_UP_APIC
          performance counters), and the NMI watchdog which detects hard
          lockups.
 
+config X86_UP_APIC_MSI
+       def_bool y
+       select X86_UP_APIC if X86_32 && !SMP && !X86_32_NON_STANDARD && PCI_MSI
+
 config X86_UP_IOAPIC
        bool "IO-APIC support on uniprocessors"
        depends on X86_UP_APIC
index 5b016e2498f3d3250b4edf12584a524bcb79e968..3db07f30636fe40c4cfea973abb08af8adc3c13c 100644 (file)
@@ -51,6 +51,7 @@ targets += cpustr.h
 $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
        $(call if_changed,cpustr)
 endif
+clean-files += cpustr.h
 
 # ---------------------------------------------------------------------------
 
index d999398928bc81ba0957fa7af17b63016f73e54e..ad754b4411f7e42aeecd8ae41a2f67866f23d24f 100644 (file)
@@ -90,7 +90,7 @@ suffix-$(CONFIG_KERNEL_LZO)   := lzo
 suffix-$(CONFIG_KERNEL_LZ4)    := lz4
 
 RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
-            perl $(srctree)/arch/x86/tools/calc_run_size.pl)
+            $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh)
 quiet_cmd_mkpiggy = MKPIGGY $@
       cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
 
index dcc1c536cc212daffda92b903ea9111e36b2c64a..a950864a64dab3d558197c77bef3c56a07961494 100644 (file)
@@ -373,6 +373,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
                                  unsigned long output_len,
                                  unsigned long run_size)
 {
+       unsigned char *output_orig = output;
+
        real_mode = rmode;
 
        sanitize_boot_params(real_mode);
@@ -421,7 +423,12 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
        debug_putstr("\nDecompressing Linux... ");
        decompress(input_data, input_len, NULL, NULL, output, NULL, error);
        parse_elf(output);
-       handle_relocations(output, output_len);
+       /*
+        * 32-bit always performs relocations. 64-bit relocations are only
+        * needed if kASLR has chosen a different load address.
+        */
+       if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig)
+               handle_relocations(output, output_len);
        debug_putstr("done.\nBooting the kernel.\n");
        return output;
 }
index fd0f848938ccd81a165c9dff570e108c84271c4d..5a4a089e8b1fd7166e396b52917424e1d9a421b5 100644 (file)
@@ -26,7 +26,6 @@ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
 
 obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
 obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
-obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
 obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
 obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
 obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
@@ -46,6 +45,7 @@ endif
 ifeq ($(avx2_supported),yes)
        obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
        obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
+       obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
 endif
 
 aes-i586-y := aes-i586-asm_32.o aes_glue.o
index 2df2a0298f5ad7075bc9b214438516270dc60bb5..a916c4a611652fb97e2e6d295e1a4b333ea532bd 100644 (file)
@@ -208,7 +208,7 @@ ddq_add_8:
 
        .if (klen == KEY_128)
                .if (load_keys)
-                       vmovdqa 3*16(p_keys), xkeyA
+                       vmovdqa 3*16(p_keys), xkey4
                .endif
        .else
                vmovdqa 3*16(p_keys), xkeyA
@@ -224,7 +224,7 @@ ddq_add_8:
        add     $(16*by), p_in
 
        .if (klen == KEY_128)
-               vmovdqa 4*16(p_keys), xkey4
+               vmovdqa 4*16(p_keys), xkeyB
        .else
                .if (load_keys)
                        vmovdqa 4*16(p_keys), xkey4
@@ -234,7 +234,12 @@ ddq_add_8:
        .set i, 0
        .rept by
                club XDATA, i
-               vaesenc xkeyA, var_xdata, var_xdata             /* key 3 */
+               /* key 3 */
+               .if (klen == KEY_128)
+                       vaesenc xkey4, var_xdata, var_xdata
+               .else
+                       vaesenc xkeyA, var_xdata, var_xdata
+               .endif
                .set i, (i +1)
        .endr
 
@@ -243,13 +248,18 @@ ddq_add_8:
        .set i, 0
        .rept by
                club XDATA, i
-               vaesenc xkey4, var_xdata, var_xdata             /* key 4 */
+               /* key 4 */
+               .if (klen == KEY_128)
+                       vaesenc xkeyB, var_xdata, var_xdata
+               .else
+                       vaesenc xkey4, var_xdata, var_xdata
+               .endif
                .set i, (i +1)
        .endr
 
        .if (klen == KEY_128)
                .if (load_keys)
-                       vmovdqa 6*16(p_keys), xkeyB
+                       vmovdqa 6*16(p_keys), xkey8
                .endif
        .else
                vmovdqa 6*16(p_keys), xkeyB
@@ -267,12 +277,17 @@ ddq_add_8:
        .set i, 0
        .rept by
                club XDATA, i
-               vaesenc xkeyB, var_xdata, var_xdata             /* key 6 */
+               /* key 6 */
+               .if (klen == KEY_128)
+                       vaesenc xkey8, var_xdata, var_xdata
+               .else
+                       vaesenc xkeyB, var_xdata, var_xdata
+               .endif
                .set i, (i +1)
        .endr
 
        .if (klen == KEY_128)
-               vmovdqa 8*16(p_keys), xkey8
+               vmovdqa 8*16(p_keys), xkeyB
        .else
                .if (load_keys)
                        vmovdqa 8*16(p_keys), xkey8
@@ -288,7 +303,7 @@ ddq_add_8:
 
        .if (klen == KEY_128)
                .if (load_keys)
-                       vmovdqa 9*16(p_keys), xkeyA
+                       vmovdqa 9*16(p_keys), xkey12
                .endif
        .else
                vmovdqa 9*16(p_keys), xkeyA
@@ -297,7 +312,12 @@ ddq_add_8:
        .set i, 0
        .rept by
                club XDATA, i
-               vaesenc xkey8, var_xdata, var_xdata             /* key 8 */
+               /* key 8 */
+               .if (klen == KEY_128)
+                       vaesenc xkeyB, var_xdata, var_xdata
+               .else
+                       vaesenc xkey8, var_xdata, var_xdata
+               .endif
                .set i, (i +1)
        .endr
 
@@ -306,7 +326,12 @@ ddq_add_8:
        .set i, 0
        .rept by
                club XDATA, i
-               vaesenc xkeyA, var_xdata, var_xdata             /* key 9 */
+               /* key 9 */
+               .if (klen == KEY_128)
+                       vaesenc xkey12, var_xdata, var_xdata
+               .else
+                       vaesenc xkeyA, var_xdata, var_xdata
+               .endif
                .set i, (i +1)
        .endr
 
@@ -412,7 +437,6 @@ ddq_add_8:
 /* main body of aes ctr load */
 
 .macro do_aes_ctrmain key_len
-
        cmp     $16, num_bytes
        jb      .Ldo_return2\key_len
 
index a225a5ca1037ede0a9fa8ebb109a43c22523ffaa..fd9f6b035b163001620d40811b728cc5e213c712 100644 (file)
@@ -931,4 +931,4 @@ module_exit(sha1_mb_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
 
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
index 0ab4f9fd268764114e3f252b07895c4bcaf63f90..3a45668f6dc38312bc9f5761214f076a144d00d4 100644 (file)
@@ -50,6 +50,7 @@ void acpi_pic_sci_set_trigger(unsigned int, u16);
 
 extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
                                  int trigger, int polarity);
+extern void (*__acpi_unregister_gsi)(u32 gsi);
 
 static inline void disable_acpi(void)
 {
index 50d033a8947db64f8ad0270fd55f23faed683c46..a94b82e8f156f3888e0ab90ac879e39dd05ccec1 100644 (file)
@@ -251,7 +251,8 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
                gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
 }
 
-#define _LDT_empty(info)                               \
+/* This intentionally ignores lm, since 32-bit apps don't have that field. */
+#define LDT_empty(info)                                        \
        ((info)->base_addr              == 0    &&      \
         (info)->limit                  == 0    &&      \
         (info)->contents               == 0    &&      \
@@ -261,11 +262,18 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
         (info)->seg_not_present        == 1    &&      \
         (info)->useable                == 0)
 
-#ifdef CONFIG_X86_64
-#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
-#else
-#define LDT_empty(info) (_LDT_empty(info))
-#endif
+/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
+static inline bool LDT_zero(const struct user_desc *info)
+{
+       return (info->base_addr         == 0 &&
+               info->limit             == 0 &&
+               info->contents          == 0 &&
+               info->read_exec_only    == 0 &&
+               info->seg_32bit         == 0 &&
+               info->limit_in_pages    == 0 &&
+               info->seg_not_present   == 0 &&
+               info->useable           == 0);
+}
 
 static inline void clear_LDT(void)
 {
index 40269a2bf6f90f6761a1a27f91d013bd5ee09017..4b75d591eb5ed1e4757ef8b658d08a2ad02a84cd 100644 (file)
@@ -130,7 +130,25 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
                              unsigned long start, unsigned long end)
 {
-       mpx_notify_unmap(mm, vma, start, end);
+       /*
+        * mpx_notify_unmap() goes and reads a rarely-hot
+        * cacheline in the mm_struct.  That can be expensive
+        * enough to be seen in profiles.
+        *
+        * The mpx_notify_unmap() call and its contents have been
+        * observed to affect munmap() performance on hardware
+        * where MPX is not present.
+        *
+        * The unlikely() optimizes for the fast case: no MPX
+        * in the CPU, or no MPX use in the process.  Even if
+        * we get this wrong (in the unlikely event that MPX
+        * is widely enabled on some system) the overhead of
+        * MPX itself (reading bounds tables) is expected to
+        * overwhelm the overhead of getting this unlikely()
+        * consistently wrong.
+        */
+       if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
+               mpx_notify_unmap(mm, vma, start, end);
 }
 
 #endif /* _ASM_X86_MMU_CONTEXT_H */
index e7e9682a33e90f350e0ce5f99c61480755614221..f556c4843aa18af74359dfeb2a41d39d9a2c3bb9 100644 (file)
@@ -80,9 +80,11 @@ static inline unsigned int __getcpu(void)
 
        /*
         * Load per CPU data from GDT.  LSL is faster than RDTSCP and
-        * works on all CPUs.
+        * works on all CPUs.  This is volatile so that it orders
+        * correctly wrt barrier() and to keep gcc from cleverly
+        * hoisting it out of the calling function.
         */
-       asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+       asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
 
        return p;
 }
index 4433a4be8171b095ff56bb6699f71d67857cc6fb..b9e30daa0881b3213bb9b6be8bf2bb1803cfba7b 100644 (file)
@@ -611,20 +611,20 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
 
 int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
 {
-       int irq;
-
-       if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
-               *irqp = gsi;
-       } else {
-               mutex_lock(&acpi_ioapic_lock);
-               irq = mp_map_gsi_to_irq(gsi,
-                                       IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
-               mutex_unlock(&acpi_ioapic_lock);
-               if (irq < 0)
-                       return -1;
-               *irqp = irq;
+       int rc, irq, trigger, polarity;
+
+       rc = acpi_get_override_irq(gsi, &trigger, &polarity);
+       if (rc == 0) {
+               trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
+               polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
+               irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
+               if (irq >= 0) {
+                       *irqp = irq;
+                       return 0;
+               }
        }
-       return 0;
+
+       return -1;
 }
 EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
 
@@ -750,13 +750,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 }
 
 /* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
 {
        return _acpi_map_lsapic(handle, physid, pcpu);
 }
-EXPORT_SYMBOL(acpi_map_lsapic);
+EXPORT_SYMBOL(acpi_map_cpu);
 
-int acpi_unmap_lsapic(int cpu)
+int acpi_unmap_cpu(int cpu)
 {
 #ifdef CONFIG_ACPI_NUMA
        set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
@@ -768,8 +768,7 @@ int acpi_unmap_lsapic(int cpu)
 
        return (0);
 }
-
-EXPORT_SYMBOL(acpi_unmap_lsapic);
+EXPORT_SYMBOL(acpi_unmap_cpu);
 #endif                         /* CONFIG_ACPI_HOTPLUG_CPU */
 
 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
index e27b49d7c922a3caaa6c45446e8bc383a86d27bc..80091ae54c2b0995ea56629a3f7e6969a484fb9b 100644 (file)
@@ -66,3 +66,4 @@ targets += capflags.c
 $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
        $(call if_changed,mkcapflags)
 endif
+clean-files += capflags.c
index e2b22df964cd88fc49b4cce67023b1e85122ed66..36d99a337b49f56398ca29d900638ddcedee277b 100644 (file)
@@ -28,7 +28,7 @@ function dump_array()
                # If the /* comment */ starts with a quote string, grab that.
                VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
                [ -z "$VALUE" ] && VALUE="\"$NAME\""
-               [ "$VALUE" == '""' ] && continue
+               [ "$VALUE" = '""' ] && continue
 
                # Name is uppercase, VALUE is all lowercase
                VALUE="$(echo "$VALUE" | tr A-Z a-z)"
index a450373e8e91698dd235725e8d8f9f35b3cec7cc..939155ffdecec60628a06b2937604dd2f2f98813 100644 (file)
@@ -107,6 +107,7 @@ static struct clocksource hyperv_cs = {
        .rating         = 400, /* use this when running on Hyperv*/
        .read           = read_hv_clock,
        .mask           = CLOCKSOURCE_MASK(64),
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
 static void __init ms_hyperv_init_platform(void)
index 944bf019b74f425e06cc465358d25b85741a5b47..498b6d967138b1fff29659e81813c77e70ff1f58 100644 (file)
@@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void)
                break;
 
        case 55: /* 22nm Atom "Silvermont"                */
+       case 76: /* 14nm Atom "Airmont"                   */
        case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
                memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
                        sizeof(hw_cache_event_ids));
index 3c895d480cd75b056ab24e686e765b70db1729f5..07398339836426eae32b55fb08fa108593cd7389 100644 (file)
@@ -568,8 +568,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
 };
 
 struct event_constraint intel_slm_pebs_event_constraints[] = {
-       /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
-       INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+       /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+       INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
        /* Allow all events as PEBS with no flags */
        INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
        EVENT_CONSTRAINT_END
index 673f930c700f3ae745d8a408111f072dcce00dd1..c4bb8b8e5017403b25847a97ccce42c96bba3837 100644 (file)
@@ -103,6 +103,13 @@ static struct kobj_attribute format_attr_##_var =          \
 
 #define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */
 
+#define RAPL_EVENT_ATTR_STR(_name, v, str)                             \
+static struct perf_pmu_events_attr event_attr_##v = {                  \
+       .attr           = __ATTR(_name, 0444, rapl_sysfs_show, NULL),   \
+       .id             = 0,                                            \
+       .event_str      = str,                                          \
+};
+
 struct rapl_pmu {
        spinlock_t       lock;
        int              hw_unit;  /* 1/2^hw_unit Joule */
@@ -135,7 +142,7 @@ static inline u64 rapl_scale(u64 v)
         * or use ldexp(count, -32).
         * Watts = Joules/Time delta
         */
-       return v << (32 - __this_cpu_read(rapl_pmu->hw_unit));
+       return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit);
 }
 
 static u64 rapl_event_update(struct perf_event *event)
@@ -379,23 +386,36 @@ static struct attribute_group rapl_pmu_attr_group = {
        .attrs = rapl_pmu_attrs,
 };
 
-EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
-EVENT_ATTR_STR(energy-pkg  ,   rapl_pkg, "event=0x02");
-EVENT_ATTR_STR(energy-ram  ,   rapl_ram, "event=0x03");
-EVENT_ATTR_STR(energy-gpu  ,   rapl_gpu, "event=0x04");
+static ssize_t rapl_sysfs_show(struct device *dev,
+                              struct device_attribute *attr,
+                              char *page)
+{
+       struct perf_pmu_events_attr *pmu_attr = \
+               container_of(attr, struct perf_pmu_events_attr, attr);
+
+       if (pmu_attr->event_str)
+               return sprintf(page, "%s", pmu_attr->event_str);
+
+       return 0;
+}
+
+RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
+RAPL_EVENT_ATTR_STR(energy-pkg  ,   rapl_pkg, "event=0x02");
+RAPL_EVENT_ATTR_STR(energy-ram  ,   rapl_ram, "event=0x03");
+RAPL_EVENT_ATTR_STR(energy-gpu  ,   rapl_gpu, "event=0x04");
 
-EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
-EVENT_ATTR_STR(energy-pkg.unit  ,   rapl_pkg_unit, "Joules");
-EVENT_ATTR_STR(energy-ram.unit  ,   rapl_ram_unit, "Joules");
-EVENT_ATTR_STR(energy-gpu.unit  ,   rapl_gpu_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-pkg.unit  ,   rapl_pkg_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-ram.unit  ,   rapl_ram_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-gpu.unit  ,   rapl_gpu_unit, "Joules");
 
 /*
  * we compute in 0.23 nJ increments regardless of MSR
  */
-EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
-EVENT_ATTR_STR(energy-pkg.scale,     rapl_pkg_scale, "2.3283064365386962890625e-10");
-EVENT_ATTR_STR(energy-ram.scale,     rapl_ram_scale, "2.3283064365386962890625e-10");
-EVENT_ATTR_STR(energy-gpu.scale,     rapl_gpu_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-pkg.scale,     rapl_pkg_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-ram.scale,     rapl_ram_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-gpu.scale,     rapl_gpu_scale, "2.3283064365386962890625e-10");
 
 static struct attribute *rapl_events_srv_attr[] = {
        EVENT_PTR(rapl_cores),
index 10b8d3eaaf15d760468a6ad88105ab7e06cd540b..c635b8b49e931e7926efc3dc96475a8c577958e0 100644 (file)
@@ -840,7 +840,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
        box->phys_id = phys_id;
        box->pci_dev = pdev;
        box->pmu = pmu;
-       uncore_box_init(box);
        pci_set_drvdata(pdev, box);
 
        raw_spin_lock(&uncore_box_lock);
@@ -1004,10 +1003,8 @@ static int uncore_cpu_starting(int cpu)
                        pmu = &type->pmus[j];
                        box = *per_cpu_ptr(pmu->box, cpu);
                        /* called by uncore_cpu_init? */
-                       if (box && box->phys_id >= 0) {
-                               uncore_box_init(box);
+                       if (box && box->phys_id >= 0)
                                continue;
-                       }
 
                        for_each_online_cpu(k) {
                                exist = *per_cpu_ptr(pmu->box, k);
@@ -1023,10 +1020,8 @@ static int uncore_cpu_starting(int cpu)
                                }
                        }
 
-                       if (box) {
+                       if (box)
                                box->phys_id = phys_id;
-                               uncore_box_init(box);
-                       }
                }
        }
        return 0;
index 18eb78bbdd1003a5f7d1d8b302b608405214741f..6c8c1e7e69d85d3ad217eada0f0e55573c3daaf0 100644 (file)
@@ -17,7 +17,7 @@
 #define UNCORE_PCI_DEV_TYPE(data)      ((data >> 8) & 0xff)
 #define UNCORE_PCI_DEV_IDX(data)       (data & 0xff)
 #define UNCORE_EXTRA_PCI_DEV           0xff
-#define UNCORE_EXTRA_PCI_DEV_MAX       2
+#define UNCORE_EXTRA_PCI_DEV_MAX       3
 
 /* support up to 8 sockets */
 #define UNCORE_SOCKET_MAX              8
@@ -257,6 +257,14 @@ static inline int uncore_num_counters(struct intel_uncore_box *box)
        return box->pmu->type->num_counters;
 }
 
+static inline void uncore_box_init(struct intel_uncore_box *box)
+{
+       if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
+               if (box->pmu->type->ops->init_box)
+                       box->pmu->type->ops->init_box(box);
+       }
+}
+
 static inline void uncore_disable_box(struct intel_uncore_box *box)
 {
        if (box->pmu->type->ops->disable_box)
@@ -265,6 +273,8 @@ static inline void uncore_disable_box(struct intel_uncore_box *box)
 
 static inline void uncore_enable_box(struct intel_uncore_box *box)
 {
+       uncore_box_init(box);
+
        if (box->pmu->type->ops->enable_box)
                box->pmu->type->ops->enable_box(box);
 }
@@ -287,14 +297,6 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box,
        return box->pmu->type->ops->read_counter(box, event);
 }
 
-static inline void uncore_box_init(struct intel_uncore_box *box)
-{
-       if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
-               if (box->pmu->type->ops->init_box)
-                       box->pmu->type->ops->init_box(box);
-       }
-}
-
 static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
 {
        return (box->phys_id < 0);
index 745b158e9a65768134caaba91d2f55f43200a481..21af6149edf2e79dd462a7e8f4994c8fd201fa0f 100644 (file)
@@ -891,6 +891,7 @@ void snbep_uncore_cpu_init(void)
 enum {
        SNBEP_PCI_QPI_PORT0_FILTER,
        SNBEP_PCI_QPI_PORT1_FILTER,
+       HSWEP_PCI_PCU_3,
 };
 
 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
@@ -2026,6 +2027,17 @@ void hswep_uncore_cpu_init(void)
 {
        if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
                hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+
+       /* Detect 6-8 core systems with only two SBOXes */
+       if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) {
+               u32 capid4;
+
+               pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3],
+                                     0x94, &capid4);
+               if (((capid4 >> 6) & 0x3) == 0)
+                       hswep_uncore_sbox.num_boxes = 2;
+       }
+
        uncore_msr_uncores = hswep_msr_uncores;
 }
 
@@ -2287,6 +2299,11 @@ static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = {
                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
                                                   SNBEP_PCI_QPI_PORT1_FILTER),
        },
+       { /* PCU.3 (for Capability registers) */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
+               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+                                                  HSWEP_PCI_PCU_3),
+       },
        { /* end: all zeroes */ }
 };
 
index 2142376dc8c6ce0a2ea8c5da5e42016545005d8f..8b7b0a51e742cd26defe12b535f37a865ee3c172 100644 (file)
@@ -674,7 +674,7 @@ static inline void *alloc_tramp(unsigned long size)
 }
 static inline void tramp_free(void *tramp)
 {
-       module_free(NULL, tramp);
+       module_memfree(tramp);
 }
 #else
 /* Trampolines can only be created if modules are supported */
index 6307a0f0cf17abc93aad4490c8bcdaccfdb29541..705ef8d48e2dc464936672fb54eea908f8f03b4e 100644 (file)
@@ -127,7 +127,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        seq_puts(p, "  Machine check polls\n");
 #endif
 #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
-       seq_printf(p, "%*s: ", prec, "THR");
+       seq_printf(p, "%*s: ", prec, "HYP");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
        seq_puts(p, "  Hypervisor callback interrupts\n");
index f7e3cd50ece02a7b0408683d113bbafca8d49479..98f654d466e585167153e58811902675bfeb5baa 100644 (file)
@@ -1020,6 +1020,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
        regs->flags &= ~X86_EFLAGS_IF;
        trace_hardirqs_off();
        regs->ip = (unsigned long)(jp->entry);
+
+       /*
+        * jprobes use jprobe_return() which skips the normal return
+        * path of the function, and this messes up the accounting of the
+        * function graph tracer to get messed up.
+        *
+        * Pause function graph tracing while performing the jprobe function.
+        */
+       pause_graph_tracing();
        return 1;
 }
 NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -1048,24 +1057,25 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
        u8 *addr = (u8 *) (regs->ip - 1);
        struct jprobe *jp = container_of(p, struct jprobe, kp);
+       void *saved_sp = kcb->jprobe_saved_sp;
 
        if ((addr > (u8 *) jprobe_return) &&
            (addr < (u8 *) jprobe_return_end)) {
-               if (stack_addr(regs) != kcb->jprobe_saved_sp) {
+               if (stack_addr(regs) != saved_sp) {
                        struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
                        printk(KERN_ERR
                               "current sp %p does not match saved sp %p\n",
-                              stack_addr(regs), kcb->jprobe_saved_sp);
+                              stack_addr(regs), saved_sp);
                        printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
                        show_regs(saved_regs);
                        printk(KERN_ERR "Current registers\n");
                        show_regs(regs);
                        BUG();
                }
+               /* It's OK to start function graph tracing again */
+               unpause_graph_tracing();
                *regs = kcb->jprobe_saved_regs;
-               memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
-                      kcb->jprobes_stack,
-                      MIN_STACK_SIZE(kcb->jprobe_saved_sp));
+               memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
                preempt_enable_no_resched();
                return 1;
        }
index e309cc5c276eaf7b2a9fa01020f14007b166875f..781861cc5ee8d7b9bbd27e9b13c380da59bb06c0 100644 (file)
@@ -78,6 +78,14 @@ u64 perf_reg_abi(struct task_struct *task)
 {
        return PERF_SAMPLE_REGS_ABI_32;
 }
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+                       struct pt_regs *regs,
+                       struct pt_regs *regs_user_copy)
+{
+       regs_user->regs = task_pt_regs(current);
+       regs_user->abi = perf_reg_abi(current);
+}
 #else /* CONFIG_X86_64 */
 #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
                       (1ULL << PERF_REG_X86_ES) | \
@@ -102,4 +110,86 @@ u64 perf_reg_abi(struct task_struct *task)
        else
                return PERF_SAMPLE_REGS_ABI_64;
 }
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+                       struct pt_regs *regs,
+                       struct pt_regs *regs_user_copy)
+{
+       struct pt_regs *user_regs = task_pt_regs(current);
+
+       /*
+        * If we're in an NMI that interrupted task_pt_regs setup, then
+        * we can't sample user regs at all.  This check isn't really
+        * sufficient, though, as we could be in an NMI inside an interrupt
+        * that happened during task_pt_regs setup.
+        */
+       if (regs->sp > (unsigned long)&user_regs->r11 &&
+           regs->sp <= (unsigned long)(user_regs + 1)) {
+               regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
+               regs_user->regs = NULL;
+               return;
+       }
+
+       /*
+        * RIP, flags, and the argument registers are usually saved.
+        * orig_ax is probably okay, too.
+        */
+       regs_user_copy->ip = user_regs->ip;
+       regs_user_copy->cx = user_regs->cx;
+       regs_user_copy->dx = user_regs->dx;
+       regs_user_copy->si = user_regs->si;
+       regs_user_copy->di = user_regs->di;
+       regs_user_copy->r8 = user_regs->r8;
+       regs_user_copy->r9 = user_regs->r9;
+       regs_user_copy->r10 = user_regs->r10;
+       regs_user_copy->r11 = user_regs->r11;
+       regs_user_copy->orig_ax = user_regs->orig_ax;
+       regs_user_copy->flags = user_regs->flags;
+
+       /*
+        * Don't even try to report the "rest" regs.
+        */
+       regs_user_copy->bx = -1;
+       regs_user_copy->bp = -1;
+       regs_user_copy->r12 = -1;
+       regs_user_copy->r13 = -1;
+       regs_user_copy->r14 = -1;
+       regs_user_copy->r15 = -1;
+
+       /*
+        * For this to be at all useful, we need a reasonable guess for
+        * sp and the ABI.  Be careful: we're in NMI context, and we're
+        * considering current to be the current task, so we should
+        * be careful not to look at any other percpu variables that might
+        * change during context switches.
+        */
+       if (IS_ENABLED(CONFIG_IA32_EMULATION) &&
+           task_thread_info(current)->status & TS_COMPAT) {
+               /* Easy case: we're in a compat syscall. */
+               regs_user->abi = PERF_SAMPLE_REGS_ABI_32;
+               regs_user_copy->sp = user_regs->sp;
+               regs_user_copy->cs = user_regs->cs;
+               regs_user_copy->ss = user_regs->ss;
+       } else if (user_regs->orig_ax != -1) {
+               /*
+                * We're probably in a 64-bit syscall.
+                * Warning: this code is severely racy.  At least it's better
+                * than just blindly copying user_regs.
+                */
+               regs_user->abi = PERF_SAMPLE_REGS_ABI_64;
+               regs_user_copy->sp = this_cpu_read(old_rsp);
+               regs_user_copy->cs = __USER_CS;
+               regs_user_copy->ss = __USER_DS;
+               regs_user_copy->cx = -1;  /* usually contains garbage */
+       } else {
+               /* We're probably in an interrupt or exception. */
+               regs_user->abi = user_64bit_mode(user_regs) ?
+                       PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
+               regs_user_copy->sp = user_regs->sp;
+               regs_user_copy->cs = user_regs->cs;
+               regs_user_copy->ss = user_regs->ss;
+       }
+
+       regs_user->regs = regs_user_copy;
+}
 #endif /* CONFIG_X86_32 */
index 4e942f31b1a7c9401a65fb37af093caab5ad0c2e..7fc5e843f247b358288b23e459eebfefcf6631f0 100644 (file)
@@ -29,7 +29,28 @@ static int get_free_idx(void)
 
 static bool tls_desc_okay(const struct user_desc *info)
 {
-       if (LDT_empty(info))
+       /*
+        * For historical reasons (i.e. no one ever documented how any
+        * of the segmentation APIs work), user programs can and do
+        * assume that a struct user_desc that's all zeros except for
+        * entry_number means "no segment at all".  This never actually
+        * worked.  In fact, up to Linux 3.19, a struct user_desc like
+        * this would create a 16-bit read-write segment with base and
+        * limit both equal to zero.
+        *
+        * That was close enough to "no segment at all" until we
+        * hardened this function to disallow 16-bit TLS segments.  Fix
+        * it up by interpreting these zeroed segments the way that they
+        * were almost certainly intended to be interpreted.
+        *
+        * The correct way to ask for "no segment at all" is to specify
+        * a user_desc that satisfies LDT_empty.  To keep everything
+        * working, we accept both.
+        *
+        * Note that there's a similar kludge in modify_ldt -- look at
+        * the distinction between modes 1 and 0x11.
+        */
+       if (LDT_empty(info) || LDT_zero(info))
                return true;
 
        /*
@@ -71,7 +92,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
        cpu = get_cpu();
 
        while (n-- > 0) {
-               if (LDT_empty(info))
+               if (LDT_empty(info) || LDT_zero(info))
                        desc->a = desc->b = 0;
                else
                        fill_ldt(desc, info);
index b7e50bba3bbbb98066fac741d826e5f6f4d7e946..505449700e0cf4e66ea6284135482ac172fe756a 100644 (file)
@@ -617,7 +617,7 @@ static unsigned long quick_pit_calibrate(void)
                        goto success;
                }
        }
-       pr_err("Fast TSC calibration failed\n");
+       pr_info("Fast TSC calibration failed\n");
        return 0;
 
 success:
index 169b09d76ddd83d3033d93d2b7eace6fada2331e..de12c1d379f16899645d96a2c3fd75663919c86d 100644 (file)
@@ -2348,7 +2348,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
         * Not recognized on AMD in compat mode (but is recognized in legacy
         * mode).
         */
-       if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
+       if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
            && !vendor_intel(ctxt))
                return emulate_ud(ctxt);
 
@@ -2359,25 +2359,13 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
        setup_syscalls_segments(ctxt, &cs, &ss);
 
        ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
-       switch (ctxt->mode) {
-       case X86EMUL_MODE_PROT32:
-               if ((msr_data & 0xfffc) == 0x0)
-                       return emulate_gp(ctxt, 0);
-               break;
-       case X86EMUL_MODE_PROT64:
-               if (msr_data == 0x0)
-                       return emulate_gp(ctxt, 0);
-               break;
-       default:
-               break;
-       }
+       if ((msr_data & 0xfffc) == 0x0)
+               return emulate_gp(ctxt, 0);
 
        ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
-       cs_sel = (u16)msr_data;
-       cs_sel &= ~SELECTOR_RPL_MASK;
+       cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
        ss_sel = cs_sel + 8;
-       ss_sel &= ~SELECTOR_RPL_MASK;
-       if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
+       if (efer & EFER_LMA) {
                cs.d = 0;
                cs.l = 1;
        }
@@ -2386,10 +2374,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
        ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
 
        ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
-       ctxt->_eip = msr_data;
+       ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
 
        ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
-       *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
+       *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
+                                                             (u32)msr_data;
 
        return X86EMUL_CONTINUE;
 }
@@ -3791,8 +3780,8 @@ static const struct opcode group5[] = {
 };
 
 static const struct opcode group6[] = {
-       DI(Prot       sldt),
-       DI(Prot       str),
+       DI(Prot | DstMem,       sldt),
+       DI(Prot | DstMem,       str),
        II(Prot | Priv | SrcMem16, em_lldt, lldt),
        II(Prot | Priv | SrcMem16, em_ltr, ltr),
        N, N, N, N,
index 4f0c0b954686cbf5e980f761b5b9bd4f7bc2df9a..d52dcf0776ea930df81ded94ed22af0b9d11e48b 100644 (file)
@@ -192,6 +192,9 @@ static void recalculate_apic_map(struct kvm *kvm)
                u16 cid, lid;
                u32 ldr, aid;
 
+               if (!kvm_apic_present(vcpu))
+                       continue;
+
                aid = kvm_apic_id(apic);
                ldr = kvm_apic_get_reg(apic, APIC_LDR);
                cid = apic_cluster_id(new, ldr);
index 2480978b31cc29e5d34cd54bbd05394eeee4b484..1313ae6b478b6c439741ee032a8c33b86868ee2c 100644 (file)
@@ -28,7 +28,7 @@
 
 /* Verify next sizeof(t) bytes can be on the same instruction */
 #define validate_next(t, insn, n)      \
-       ((insn)->next_byte + sizeof(t) + n < (insn)->end_kaddr)
+       ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
 
 #define __get_next(t, insn)    \
        ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
index 38dcec403b46ae5b33c5967e6740faa8378e1380..e3ff27a5b6348ffb2dcff6f592abafe48b6b6396 100644 (file)
@@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
                if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
                             VM_FAULT_HWPOISON_LARGE))
                        do_sigbus(regs, error_code, address, fault);
+               else if (fault & VM_FAULT_SIGSEGV)
+                       bad_area_nosemaphore(regs, error_code, address);
                else
                        BUG();
        }
index a97ee0801475a2e25df60f336c552408fe1d2b63..079c3b6a3ff181277a7cb4270895f27d9a1d6f8b 100644 (file)
@@ -43,7 +43,7 @@ uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
        [_PAGE_CACHE_MODE_WT]           = _PAGE_PCD,
        [_PAGE_CACHE_MODE_WP]           = _PAGE_PCD,
 };
-EXPORT_SYMBOL_GPL(__cachemode2pte_tbl);
+EXPORT_SYMBOL(__cachemode2pte_tbl);
 uint8_t __pte2cachemode_tbl[8] = {
        [__pte2cm_idx(0)] = _PAGE_CACHE_MODE_WB,
        [__pte2cm_idx(_PAGE_PWT)] = _PAGE_CACHE_MODE_WC,
@@ -54,7 +54,7 @@ uint8_t __pte2cachemode_tbl[8] = {
        [__pte2cm_idx(_PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
        [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
 };
-EXPORT_SYMBOL_GPL(__pte2cachemode_tbl);
+EXPORT_SYMBOL(__pte2cachemode_tbl);
 
 static unsigned long __initdata pgt_buf_start;
 static unsigned long __initdata pgt_buf_end;
@@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping(
 static unsigned long __init get_new_step_size(unsigned long step_size)
 {
        /*
-        * Explain why we shift by 5 and why we don't have to worry about
-        * 'step_size << 5' overflowing:
-        *
-        * initial mapped size is PMD_SIZE (2M).
+        * Initial mapped size is PMD_SIZE (2M).
         * We can not set step_size to be PUD_SIZE (1G) yet.
         * In worse case, when we cross the 1G boundary, and
         * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
-        * to map 1G range with PTE. Use 5 as shift for now.
+        * to map 1G range with PTE. Hence we use one less than the
+        * difference of page table level shifts.
         *
-        * Don't need to worry about overflow, on 32bit, when step_size
-        * is 0, round_down() returns 0 for start, and that turns it
-        * into 0x100000000ULL.
+        * Don't need to worry about overflow in the top-down case, on 32bit,
+        * when step_size is 0, round_down() returns 0 for start, and that
+        * turns it into 0x100000000ULL.
+        * In the bottom-up case, round_up(x, 0) returns 0 though too, which
+        * needs to be taken into consideration by the code below.
         */
-       return step_size << 5;
+       return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
 }
 
 /**
@@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start,
        unsigned long step_size;
        unsigned long addr;
        unsigned long mapped_ram_size = 0;
-       unsigned long new_mapped_ram_size;
 
        /* xen has big range in reserved near end of ram, skip it at first.*/
        addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
@@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start,
                                start = map_start;
                } else
                        start = map_start;
-               new_mapped_ram_size = init_range_memory_mapping(start,
+               mapped_ram_size += init_range_memory_mapping(start,
                                                        last_start);
                last_start = start;
                min_pfn_mapped = last_start >> PAGE_SHIFT;
-               /* only increase step_size after big range get mapped */
-               if (new_mapped_ram_size > mapped_ram_size)
+               if (mapped_ram_size >= step_size)
                        step_size = get_new_step_size(step_size);
-               mapped_ram_size += new_mapped_ram_size;
        }
 
        if (real_end < map_end)
@@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start,
 static void __init memory_map_bottom_up(unsigned long map_start,
                                        unsigned long map_end)
 {
-       unsigned long next, new_mapped_ram_size, start;
+       unsigned long next, start;
        unsigned long mapped_ram_size = 0;
        /* step_size need to be small so pgt_buf from BRK could cover it */
        unsigned long step_size = PMD_SIZE;
@@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start,
         * for page table.
         */
        while (start < map_end) {
-               if (map_end - start > step_size) {
+               if (step_size && map_end - start > step_size) {
                        next = round_up(start + 1, step_size);
                        if (next > map_end)
                                next = map_end;
-               } else
+               } else {
                        next = map_end;
+               }
 
-               new_mapped_ram_size = init_range_memory_mapping(start, next);
+               mapped_ram_size += init_range_memory_mapping(start, next);
                start = next;
 
-               if (new_mapped_ram_size > mapped_ram_size)
+               if (mapped_ram_size >= step_size)
                        step_size = get_new_step_size(step_size);
-               mapped_ram_size += new_mapped_ram_size;
        }
 }
 
index 67ebf57512229a4a29bceda04324c177a587232f..c439ec47821601c5b594bc1eec5abc529c5fd012 100644 (file)
@@ -348,6 +348,12 @@ static __user void *task_get_bounds_dir(struct task_struct *tsk)
        if (!cpu_feature_enabled(X86_FEATURE_MPX))
                return MPX_INVALID_BOUNDS_DIR;
 
+       /*
+        * 32-bit binaries on 64-bit kernels are currently
+        * unsupported.
+        */
+       if (IS_ENABLED(CONFIG_X86_64) && test_thread_flag(TIF_IA32))
+               return MPX_INVALID_BOUNDS_DIR;
        /*
         * The bounds directory pointer is stored in a register
         * only accessible if we first do an xsave.
index edf299c8ff6c774dea8116092a41b15102d9cf1c..7ac68698406c3b35e5ce0b0e98c73c5441e869a3 100644 (file)
@@ -234,8 +234,13 @@ void pat_init(void)
              PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
 
        /* Boot CPU check */
-       if (!boot_pat_state)
+       if (!boot_pat_state) {
                rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
+               if (!boot_pat_state) {
+                       pat_disable("PAT read returns always zero, disabled.");
+                       return;
+               }
+       }
 
        wrmsrl(MSR_IA32_CR_PAT, pat);
 
index 7b20bccf3648dfb0fcb534f0290c023e12a44f9a..2fb384724ebb52d1cf0ba6131b418e81609ca55c 100644 (file)
@@ -448,6 +448,22 @@ static const struct dmi_system_id pciprobe_dmi_table[] __initconst = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
                },
        },
+        {
+                .callback = set_scan_all,
+                .ident = "Stratus/NEC ftServer",
+                .matches = {
+                        DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
+                        DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"),
+                },
+        },
+        {
+                .callback = set_scan_all,
+                .ident = "Stratus/NEC ftServer",
+                .matches = {
+                        DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
+                        DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"),
+                },
+        },
        {}
 };
 
index 9b18ef315a559bca66ba227da77ac2d329475f19..349c0d32cc0b140222141cfec5fa29a8c6ddbace 100644 (file)
@@ -216,7 +216,7 @@ static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
                        continue;
                if (r->parent)  /* Already allocated */
                        continue;
-               if (!r->start || pci_claim_resource(dev, idx) < 0) {
+               if (!r->start || pci_claim_bridge_resource(dev, idx) < 0) {
                        /*
                         * Something is wrong with the region.
                         * Invalidate the resource to prevent
index c489ef2c1a3915a0b22952520d89c82958a718c9..9098d880c476cf842598d32805ed25ac37fc9ae1 100644 (file)
@@ -458,6 +458,7 @@ int __init pci_xen_hvm_init(void)
         * just how GSIs get registered.
         */
        __acpi_register_gsi = acpi_register_gsi_xen_hvm;
+       __acpi_unregister_gsi = NULL;
 #endif
 
 #ifdef CONFIG_PCI_MSI
@@ -471,52 +472,6 @@ int __init pci_xen_hvm_init(void)
 }
 
 #ifdef CONFIG_XEN_DOM0
-static __init void xen_setup_acpi_sci(void)
-{
-       int rc;
-       int trigger, polarity;
-       int gsi = acpi_sci_override_gsi;
-       int irq = -1;
-       int gsi_override = -1;
-
-       if (!gsi)
-               return;
-
-       rc = acpi_get_override_irq(gsi, &trigger, &polarity);
-       if (rc) {
-               printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi"
-                               " sci, rc=%d\n", rc);
-               return;
-       }
-       trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
-       polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
-
-       printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
-                       "polarity=%d\n", gsi, trigger, polarity);
-
-       /* Before we bind the GSI to a Linux IRQ, check whether
-        * we need to override it with bus_irq (IRQ) value. Usually for
-        * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
-        *  ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
-        * but there are oddballs where the IRQ != GSI:
-        *  ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
-        * which ends up being: gsi_to_irq[9] == 20
-        * (which is what acpi_gsi_to_irq ends up calling when starting the
-        * the ACPI interpreter and keels over since IRQ 9 has not been
-        * setup as we had setup IRQ 20 for it).
-        */
-       if (acpi_gsi_to_irq(gsi, &irq) == 0) {
-               /* Use the provided value if it's valid. */
-               if (irq >= 0)
-                       gsi_override = irq;
-       }
-
-       gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity);
-       printk(KERN_INFO "xen: acpi sci %d\n", gsi);
-
-       return;
-}
-
 int __init pci_xen_initial_domain(void)
 {
        int irq;
@@ -527,8 +482,8 @@ int __init pci_xen_initial_domain(void)
        x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
        pci_msi_ignore_mask = 1;
 #endif
-       xen_setup_acpi_sci();
        __acpi_register_gsi = acpi_register_gsi_xen;
+       __acpi_unregister_gsi = NULL;
        /* Pre-allocate legacy irqs */
        for (irq = 0; irq < nr_legacy_irqs(); irq++) {
                int trigger, polarity;
diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl
deleted file mode 100644 (file)
index 23210ba..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/perl
-#
-# Calculate the amount of space needed to run the kernel, including room for
-# the .bss and .brk sections.
-#
-# Usage:
-# objdump -h a.out | perl calc_run_size.pl
-use strict;
-
-my $mem_size = 0;
-my $file_offset = 0;
-
-my $sections=" *[0-9]+ \.(?:bss|brk) +";
-while (<>) {
-       if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) {
-               my $size = hex($1);
-               my $offset = hex($2);
-               $mem_size += $size;
-               if ($file_offset == 0) {
-                       $file_offset = $offset;
-               } elsif ($file_offset != $offset) {
-                       # BFD linker shows the same file offset in ELF.
-                       # Gold linker shows them as consecutive.
-                       next if ($file_offset + $mem_size == $offset + $size);
-
-                       printf STDERR "file_offset: 0x%lx\n", $file_offset;
-                       printf STDERR "mem_size: 0x%lx\n", $mem_size;
-                       printf STDERR "offset: 0x%lx\n", $offset;
-                       printf STDERR "size: 0x%lx\n", $size;
-
-                       die ".bss and .brk are non-contiguous\n";
-               }
-       }
-}
-
-if ($file_offset == 0) {
-       die "Never found .bss or .brk file offset\n";
-}
-printf("%d\n", $mem_size + $file_offset);
diff --git a/arch/x86/tools/calc_run_size.sh b/arch/x86/tools/calc_run_size.sh
new file mode 100644 (file)
index 0000000..1a4c17b
--- /dev/null
@@ -0,0 +1,42 @@
+#!/bin/sh
+#
+# Calculate the amount of space needed to run the kernel, including room for
+# the .bss and .brk sections.
+#
+# Usage:
+# objdump -h a.out | sh calc_run_size.sh
+
+NUM='\([0-9a-fA-F]*[ \t]*\)'
+OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p')
+if [ -z "$OUT" ] ; then
+       echo "Never found .bss or .brk file offset" >&2
+       exit 1
+fi
+
+OUT=$(echo ${OUT# })
+sizeA=$(printf "%d" 0x${OUT%% *})
+OUT=${OUT#* }
+offsetA=$(printf "%d" 0x${OUT%% *})
+OUT=${OUT#* }
+sizeB=$(printf "%d" 0x${OUT%% *})
+OUT=${OUT#* }
+offsetB=$(printf "%d" 0x${OUT%% *})
+
+run_size=$(( $offsetA + $sizeA + $sizeB ))
+
+# BFD linker shows the same file offset in ELF.
+if [ "$offsetA" -ne "$offsetB" ] ; then
+       # Gold linker shows them as consecutive.
+       endB=$(( $offsetB + $sizeB ))
+       if [ "$endB" != "$run_size" ] ; then
+               printf "sizeA: 0x%x\n" $sizeA >&2
+               printf "offsetA: 0x%x\n" $offsetA >&2
+               printf "sizeB: 0x%x\n" $sizeB >&2
+               printf "offsetB: 0x%x\n" $offsetB >&2
+               echo ".bss and .brk are non-contiguous" >&2
+               exit 1
+       fi
+fi
+
+printf "%d\n" $run_size
+exit 0
index 531d4269e2e3c5303e8b40e6753dd20ddab3c405..bd16d6c370ec9aaeb3328779e277de8129ef61f3 100644 (file)
@@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
 
 extern asmlinkage void sys_ni_syscall(void);
 
-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
+const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
        /*
         * Smells like a compiler bug -- it doesn't work
         * when the & below is removed.
index 20c3649d06915cce37ea62b5ee8c9a6d704c0a62..5cdfa9db22175ed8dc327465b4cf033a7a65d3bc 100644 (file)
@@ -47,7 +47,7 @@ typedef void (*sys_call_ptr_t)(void);
 
 extern void sys_ni_syscall(void);
 
-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
+const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
        /*
         * Smells like a compiler bug -- it doesn't work
         * when the & below is removed.
index 009495b9ab4bc52c0927accd29fe0b673cda5f7e..1c9f750c38592c7278c95d7f8dbe6e1a88835c0c 100644 (file)
@@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image)
 
 struct linux_binprm;
 
-/* Put the vdso above the (randomized) stack with another randomized offset.
-   This way there is no hole in the middle of address space.
-   To save memory make sure it is still in the same PTE as the stack top.
-   This doesn't give that many random bits.
-
-   Only used for the 64-bit and x32 vdsos. */
+/*
+ * Put the vdso above the (randomized) stack with another randomized
+ * offset.  This way there is no hole in the middle of address space.
+ * To save memory make sure it is still in the same PTE as the stack
+ * top.  This doesn't give that many random bits.
+ *
+ * Note that this algorithm is imperfect: the distribution of the vdso
+ * start address within a PMD is biased toward the end.
+ *
+ * Only used for the 64-bit and x32 vdsos.
+ */
 static unsigned long vdso_addr(unsigned long start, unsigned len)
 {
 #ifdef CONFIG_X86_32
@@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
 #else
        unsigned long addr, end;
        unsigned offset;
-       end = (start + PMD_SIZE - 1) & PMD_MASK;
+
+       /*
+        * Round up the start address.  It can start out unaligned as a result
+        * of stack start randomization.
+        */
+       start = PAGE_ALIGN(start);
+
+       /* Round the lowest possible end address up to a PMD boundary. */
+       end = (start + len + PMD_SIZE - 1) & PMD_MASK;
        if (end >= TASK_SIZE_MAX)
                end = TASK_SIZE_MAX;
        end -= len;
-       /* This loses some more bits than a modulo, but is cheaper */
-       offset = get_random_int() & (PTRS_PER_PTE - 1);
-       addr = start + (offset << PAGE_SHIFT);
-       if (addr >= end)
-               addr = end;
+
+       if (end > start) {
+               offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+               addr = start + (offset << PAGE_SHIFT);
+       } else {
+               addr = start;
+       }
 
        /*
-        * page-align it here so that get_unmapped_area doesn't
-        * align it wrongfully again to the next page. addr can come in 4K
-        * unaligned here as a result of stack start randomization.
+        * Forcibly align the final address in case we have a hardware
+        * issue that requires alignment for performance reasons.
         */
-       addr = PAGE_ALIGN(addr);
        addr = align_vdso_addr(addr);
 
        return addr;
index 6bf3a13e3e0f7af10c8d984f829a512661d12c2e..78a881b7fc415e16f50f16e4381e968370f4fd9f 100644 (file)
@@ -40,6 +40,7 @@
 #include <xen/interface/physdev.h>
 #include <xen/interface/vcpu.h>
 #include <xen/interface/memory.h>
+#include <xen/interface/nmi.h>
 #include <xen/interface/xen-mca.h>
 #include <xen/features.h>
 #include <xen/page.h>
@@ -66,6 +67,7 @@
 #include <asm/reboot.h>
 #include <asm/stackprotector.h>
 #include <asm/hypervisor.h>
+#include <asm/mach_traps.h>
 #include <asm/mwait.h>
 #include <asm/pci_x86.h>
 #include <asm/pat.h>
@@ -1351,6 +1353,21 @@ static const struct machine_ops xen_machine_ops __initconst = {
        .emergency_restart = xen_emergency_restart,
 };
 
+static unsigned char xen_get_nmi_reason(void)
+{
+       unsigned char reason = 0;
+
+       /* Construct a value which looks like it came from port 0x61. */
+       if (test_bit(_XEN_NMIREASON_io_error,
+                    &HYPERVISOR_shared_info->arch.nmi_reason))
+               reason |= NMI_REASON_IOCHK;
+       if (test_bit(_XEN_NMIREASON_pci_serr,
+                    &HYPERVISOR_shared_info->arch.nmi_reason))
+               reason |= NMI_REASON_SERR;
+
+       return reason;
+}
+
 static void __init xen_boot_params_init_edd(void)
 {
 #if IS_ENABLED(CONFIG_EDD)
@@ -1535,9 +1552,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
        pv_info = xen_info;
        pv_init_ops = xen_init_ops;
        pv_apic_ops = xen_apic_ops;
-       if (!xen_pvh_domain())
+       if (!xen_pvh_domain()) {
                pv_cpu_ops = xen_cpu_ops;
 
+               x86_platform.get_nmi_reason = xen_get_nmi_reason;
+       }
+
        if (xen_feature(XENFEAT_auto_translated_physmap))
                x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
        else
index edbc7a63fd737f0ca13edac752e1f06341f9dd97..70fb5075c901f5b0c370478b156288764f609ad8 100644 (file)
@@ -167,10 +167,13 @@ static void * __ref alloc_p2m_page(void)
        return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
 }
 
-/* Only to be called in case of a race for a page just allocated! */
-static void free_p2m_page(void *p)
+static void __ref free_p2m_page(void *p)
 {
-       BUG_ON(!slab_is_available());
+       if (unlikely(!slab_is_available())) {
+               free_bootmem((unsigned long)p, PAGE_SIZE);
+               return;
+       }
+
        free_page((unsigned long)p);
 }
 
@@ -375,7 +378,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
                        p2m_missing_pte : p2m_identity_pte;
                for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
                        pmdp = populate_extra_pmd(
-                               (unsigned long)(p2m + pfn + i * PTRS_PER_PTE));
+                               (unsigned long)(p2m + pfn) + i * PMD_SIZE);
                        set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
                }
        }
@@ -436,10 +439,9 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine);
  * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
  * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
  */
-static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
+static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
 {
        pte_t *ptechk;
-       pte_t *pteret = ptep;
        pte_t *pte_newpg[PMDS_PER_MID_PAGE];
        pmd_t *pmdp;
        unsigned int level;
@@ -473,8 +475,6 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
                if (ptechk == pte_pg) {
                        set_pmd(pmdp,
                                __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
-                       if (vaddr == (addr & ~(PMD_SIZE - 1)))
-                               pteret = pte_offset_kernel(pmdp, addr);
                        pte_newpg[i] = NULL;
                }
 
@@ -488,7 +488,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
                vaddr += PMD_SIZE;
        }
 
-       return pteret;
+       return lookup_address(addr, &level);
 }
 
 /*
@@ -517,7 +517,7 @@ static bool alloc_p2m(unsigned long pfn)
 
        if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
                /* PMD level is missing, allocate a new one */
-               ptep = alloc_p2m_pmd(addr, ptep, pte_pg);
+               ptep = alloc_p2m_pmd(addr, pte_pg);
                if (!ptep)
                        return false;
        }
index dfd77dec8e2b7c1ef72d16adb1ecf7f7b80c88de..865e56cea7a0abe4d9b6feb2e1d0da27957d47e8 100644 (file)
@@ -140,7 +140,7 @@ static void __init xen_del_extra_mem(u64 start, u64 size)
 unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
 {
        int i;
-       unsigned long addr = PFN_PHYS(pfn);
+       phys_addr_t addr = PFN_PHYS(pfn);
 
        for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
                if (addr >= xen_extra_mem[i].start &&
@@ -160,6 +160,8 @@ void __init xen_inv_extra_mem(void)
        int i;
 
        for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+               if (!xen_extra_mem[i].size)
+                       continue;
                pfn_s = PFN_DOWN(xen_extra_mem[i].start);
                pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
                for (pfn = pfn_s; pfn < pfn_e; pfn++)
@@ -229,15 +231,14 @@ static int __init xen_free_mfn(unsigned long mfn)
  * as a fallback if the remapping fails.
  */
 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
-       unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
-       unsigned long *released)
+       unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
 {
-       unsigned long len = 0;
        unsigned long pfn, end;
        int ret;
 
        WARN_ON(start_pfn > end_pfn);
 
+       /* Release pages first. */
        end = min(end_pfn, nr_pages);
        for (pfn = start_pfn; pfn < end; pfn++) {
                unsigned long mfn = pfn_to_mfn(pfn);
@@ -250,16 +251,14 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
                WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
 
                if (ret == 1) {
+                       (*released)++;
                        if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
                                break;
-                       len++;
                } else
                        break;
        }
 
-       /* Need to release pages first */
-       *released += len;
-       *identity += set_phys_range_identity(start_pfn, end_pfn);
+       set_phys_range_identity(start_pfn, end_pfn);
 }
 
 /*
@@ -287,7 +286,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
        }
 
        /* Update kernel mapping, but not for highmem. */
-       if ((pfn << PAGE_SHIFT) >= __pa(high_memory))
+       if (pfn >= PFN_UP(__pa(high_memory - 1)))
                return;
 
        if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
@@ -318,7 +317,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
        unsigned long ident_pfn_iter, remap_pfn_iter;
        unsigned long ident_end_pfn = start_pfn + size;
        unsigned long left = size;
-       unsigned long ident_cnt = 0;
        unsigned int i, chunk;
 
        WARN_ON(size == 0);
@@ -347,8 +345,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
                xen_remap_mfn = mfn;
 
                /* Set identity map */
-               ident_cnt += set_phys_range_identity(ident_pfn_iter,
-                       ident_pfn_iter + chunk);
+               set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
 
                left -= chunk;
        }
@@ -371,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
 static unsigned long __init xen_set_identity_and_remap_chunk(
         const struct e820entry *list, size_t map_size, unsigned long start_pfn,
        unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
-       unsigned long *identity, unsigned long *released)
+       unsigned long *released, unsigned long *remapped)
 {
        unsigned long pfn;
        unsigned long i = 0;
@@ -386,8 +383,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
                /* Do not remap pages beyond the current allocation */
                if (cur_pfn >= nr_pages) {
                        /* Identity map remaining pages */
-                       *identity += set_phys_range_identity(cur_pfn,
-                               cur_pfn + size);
+                       set_phys_range_identity(cur_pfn, cur_pfn + size);
                        break;
                }
                if (cur_pfn + size > nr_pages)
@@ -398,7 +394,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
                if (!remap_range_size) {
                        pr_warning("Unable to find available pfn range, not remapping identity pages\n");
                        xen_set_identity_and_release_chunk(cur_pfn,
-                               cur_pfn + left, nr_pages, identity, released);
+                               cur_pfn + left, nr_pages, released);
                        break;
                }
                /* Adjust size to fit in current e820 RAM region */
@@ -410,7 +406,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
                /* Update variables to reflect new mappings. */
                i += size;
                remap_pfn += size;
-               *identity += size;
+               *remapped += size;
        }
 
        /*
@@ -427,13 +423,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
 
 static void __init xen_set_identity_and_remap(
        const struct e820entry *list, size_t map_size, unsigned long nr_pages,
-       unsigned long *released)
+       unsigned long *released, unsigned long *remapped)
 {
        phys_addr_t start = 0;
-       unsigned long identity = 0;
        unsigned long last_pfn = nr_pages;
        const struct e820entry *entry;
        unsigned long num_released = 0;
+       unsigned long num_remapped = 0;
        int i;
 
        /*
@@ -460,14 +456,14 @@ static void __init xen_set_identity_and_remap(
                                last_pfn = xen_set_identity_and_remap_chunk(
                                                list, map_size, start_pfn,
                                                end_pfn, nr_pages, last_pfn,
-                                               &identity, &num_released);
+                                               &num_released, &num_remapped);
                        start = end;
                }
        }
 
        *released = num_released;
+       *remapped = num_remapped;
 
-       pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
        pr_info("Released %ld page(s)\n", num_released);
 }
 
@@ -586,6 +582,7 @@ char * __init xen_memory_setup(void)
        struct xen_memory_map memmap;
        unsigned long max_pages;
        unsigned long extra_pages = 0;
+       unsigned long remapped_pages;
        int i;
        int op;
 
@@ -635,9 +632,10 @@ char * __init xen_memory_setup(void)
         * underlying RAM.
         */
        xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
-                                  &xen_released_pages);
+                                  &xen_released_pages, &remapped_pages);
 
        extra_pages += xen_released_pages;
+       extra_pages += remapped_pages;
 
        /*
         * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
index f473d268d387fcdc8f237153b378508ec0c03f56..69087341d9aed7860dfd95549c0a8af5ffbf1ed6 100644 (file)
@@ -391,7 +391,7 @@ static const struct clock_event_device *xen_clockevent =
 
 struct xen_clock_event_device {
        struct clock_event_device evt;
-       char *name;
+       char name[16];
 };
 static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
 
@@ -420,46 +420,38 @@ void xen_teardown_timer(int cpu)
        if (evt->irq >= 0) {
                unbind_from_irqhandler(evt->irq, NULL);
                evt->irq = -1;
-               kfree(per_cpu(xen_clock_events, cpu).name);
-               per_cpu(xen_clock_events, cpu).name = NULL;
        }
 }
 
 void xen_setup_timer(int cpu)
 {
-       char *name;
-       struct clock_event_device *evt;
+       struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
+       struct clock_event_device *evt = &xevt->evt;
        int irq;
 
-       evt = &per_cpu(xen_clock_events, cpu).evt;
        WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
        if (evt->irq >= 0)
                xen_teardown_timer(cpu);
 
        printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
 
-       name = kasprintf(GFP_KERNEL, "timer%d", cpu);
-       if (!name)
-               name = "<timer kasprintf failed>";
+       snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
 
        irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
                                      IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
                                      IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
-                                     name, NULL);
+                                     xevt->name, NULL);
        (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
 
        memcpy(evt, xen_clockevent, sizeof(*evt));
 
        evt->cpumask = cpumask_of(cpu);
        evt->irq = irq;
-       per_cpu(xen_clock_events, cpu).name = name;
 }
 
 
 void xen_setup_cpu_clockevents(void)
 {
-       BUG_ON(preemptible());
-
        clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
 }
 
index b57c4f91f487efdc6b2f3f44fe43cfb12e9a7e0c..9e3571a6535c3b1bbc8535195ee40405fe9c42c0 100644 (file)
@@ -117,6 +117,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 30f6153a40c27c7154dd453301807c7d39d1451c..3ad405571dcc5105a52da4284477a187db936f64 100644 (file)
@@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
 
+void blk_set_queue_dying(struct request_queue *q)
+{
+       queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+
+       if (q->mq_ops)
+               blk_mq_wake_waiters(q);
+       else {
+               struct request_list *rl;
+
+               blk_queue_for_each_rl(rl, q) {
+                       if (rl->rq_pool) {
+                               wake_up(&rl->wait[BLK_RW_SYNC]);
+                               wake_up(&rl->wait[BLK_RW_ASYNC]);
+                       }
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(blk_set_queue_dying);
+
 /**
  * blk_cleanup_queue - shutdown a request queue
  * @q: request queue to shutdown
@@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q)
 
        /* mark @q DYING, no new request or merges will be allowed afterwards */
        mutex_lock(&q->sysfs_lock);
-       queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+       blk_set_queue_dying(q);
        spin_lock_irq(lock);
 
        /*
index 32e8dbb9ad1c49f0078e57fae100f0e6a7eb8a73..60c9d4a93fe470ced7471cd8653d8a00fc8922d7 100644 (file)
@@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 }
 
 /*
- * Wakeup all potentially sleeping on normal (non-reserved) tags
+ * Wakeup all potentially sleeping on tags
  */
-static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
+void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 {
        struct blk_mq_bitmap_tags *bt;
        int i, wake_index;
@@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
 
                wake_index = bt_index_inc(wake_index);
        }
+
+       if (include_reserve) {
+               bt = &tags->breserved_tags;
+               if (waitqueue_active(&bt->bs[0].wait))
+                       wake_up(&bt->bs[0].wait);
+       }
 }
 
 /*
@@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 
        atomic_dec(&tags->active_queues);
 
-       blk_mq_tag_wakeup_all(tags);
+       blk_mq_tag_wakeup_all(tags, false);
 }
 
 /*
@@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
         * static and should never need resizing.
         */
        bt_update_count(&tags->bitmap_tags, tdepth);
-       blk_mq_tag_wakeup_all(tags);
+       blk_mq_tag_wakeup_all(tags, false);
        return 0;
 }
 
index 6206ed17ef766714b655a715ffbc05fd34b0463a..a6fa0fc9d41a2e91c8bb4ce29bb2b1a0d952c8ed 100644 (file)
@@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
 extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
 extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
 extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
+extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
 
 enum {
        BLK_MQ_TAG_CACHE_MIN    = 1,
index da1ab5641227b670faac42a84fde7e223668a4d8..2390c5541e71fb09c3353004224d76b4c81bddc8 100644 (file)
@@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
        wake_up_all(&q->mq_freeze_wq);
 }
 
-static void blk_mq_freeze_queue_start(struct request_queue *q)
+void blk_mq_freeze_queue_start(struct request_queue *q)
 {
        bool freeze;
 
@@ -120,6 +120,7 @@ static void blk_mq_freeze_queue_start(struct request_queue *q)
                blk_mq_run_queues(q, false);
        }
 }
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
 
 static void blk_mq_freeze_queue_wait(struct request_queue *q)
 {
@@ -136,7 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q)
        blk_mq_freeze_queue_wait(q);
 }
 
-static void blk_mq_unfreeze_queue(struct request_queue *q)
+void blk_mq_unfreeze_queue(struct request_queue *q)
 {
        bool wake;
 
@@ -149,6 +150,24 @@ static void blk_mq_unfreeze_queue(struct request_queue *q)
                wake_up_all(&q->mq_freeze_wq);
        }
 }
+EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
+
+void blk_mq_wake_waiters(struct request_queue *q)
+{
+       struct blk_mq_hw_ctx *hctx;
+       unsigned int i;
+
+       queue_for_each_hw_ctx(q, hctx, i)
+               if (blk_mq_hw_queue_mapped(hctx))
+                       blk_mq_tag_wakeup_all(hctx->tags, true);
+
+       /*
+        * If we are called because the queue has now been marked as
+        * dying, we need to ensure that processes currently waiting on
+        * the queue are notified as well.
+        */
+       wake_up_all(&q->mq_freeze_wq);
+}
 
 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 {
@@ -258,8 +277,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
                ctx = alloc_data.ctx;
        }
        blk_mq_put_ctx(ctx);
-       if (!rq)
+       if (!rq) {
+               blk_mq_queue_exit(q);
                return ERR_PTR(-EWOULDBLOCK);
+       }
        return rq;
 }
 EXPORT_SYMBOL(blk_mq_alloc_request);
@@ -383,6 +404,12 @@ void blk_mq_complete_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
+int blk_mq_request_started(struct request *rq)
+{
+       return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+}
+EXPORT_SYMBOL_GPL(blk_mq_request_started);
+
 void blk_mq_start_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
@@ -500,12 +527,38 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
 }
 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
 
+void blk_mq_cancel_requeue_work(struct request_queue *q)
+{
+       cancel_work_sync(&q->requeue_work);
+}
+EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
+
 void blk_mq_kick_requeue_list(struct request_queue *q)
 {
        kblockd_schedule_work(&q->requeue_work);
 }
 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
 
+void blk_mq_abort_requeue_list(struct request_queue *q)
+{
+       unsigned long flags;
+       LIST_HEAD(rq_list);
+
+       spin_lock_irqsave(&q->requeue_lock, flags);
+       list_splice_init(&q->requeue_list, &rq_list);
+       spin_unlock_irqrestore(&q->requeue_lock, flags);
+
+       while (!list_empty(&rq_list)) {
+               struct request *rq;
+
+               rq = list_first_entry(&rq_list, struct request, queuelist);
+               list_del_init(&rq->queuelist);
+               rq->errors = -EIO;
+               blk_mq_end_request(rq, rq->errors);
+       }
+}
+EXPORT_SYMBOL(blk_mq_abort_requeue_list);
+
 static inline bool is_flush_request(struct request *rq,
                struct blk_flush_queue *fq, unsigned int tag)
 {
@@ -566,13 +619,24 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved)
                break;
        }
 }
-               
+
 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
                struct request *rq, void *priv, bool reserved)
 {
        struct blk_mq_timeout_data *data = priv;
 
-       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
+               /*
+                * If a request wasn't started before the queue was
+                * marked dying, kill it here or it'll go unnoticed.
+                */
+               if (unlikely(blk_queue_dying(rq->q))) {
+                       rq->errors = -EIO;
+                       blk_mq_complete_request(rq);
+               }
+               return;
+       }
+       if (rq->cmd_flags & REQ_NO_TIMEOUT)
                return;
 
        if (time_after_eq(jiffies, rq->deadline)) {
@@ -1577,10 +1641,8 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
        struct blk_mq_hw_ctx *hctx;
        unsigned int i;
 
-       queue_for_each_hw_ctx(q, hctx, i) {
+       queue_for_each_hw_ctx(q, hctx, i)
                free_cpumask_var(hctx->cpumask);
-               kfree(hctx);
-       }
 }
 
 static int blk_mq_init_hctx(struct request_queue *q,
@@ -1601,7 +1663,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
        hctx->queue = q;
        hctx->queue_num = hctx_idx;
        hctx->flags = set->flags;
-       hctx->cmd_size = set->cmd_size;
 
        blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
                                        blk_mq_hctx_notify, hctx);
@@ -1806,6 +1867,27 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
        mutex_unlock(&set->tag_list_lock);
 }
 
+/*
+ * It is the actual release handler for mq, but we do it from
+ * request queue's release handler for avoiding use-after-free
+ * and headache because q->mq_kobj shouldn't have been introduced,
+ * but we can't group ctx/kctx kobj without it.
+ */
+void blk_mq_release(struct request_queue *q)
+{
+       struct blk_mq_hw_ctx *hctx;
+       unsigned int i;
+
+       /* hctx kobj stays in hctx */
+       queue_for_each_hw_ctx(q, hctx, i)
+               kfree(hctx);
+
+       kfree(q->queue_hw_ctx);
+
+       /* ctx kobj stays in queue_ctx */
+       free_percpu(q->queue_ctx);
+}
+
 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 {
        struct blk_mq_hw_ctx **hctxs;
@@ -1939,12 +2021,8 @@ void blk_mq_free_queue(struct request_queue *q)
 
        percpu_ref_exit(&q->mq_usage_counter);
 
-       free_percpu(q->queue_ctx);
-       kfree(q->queue_hw_ctx);
        kfree(q->mq_map);
 
-       q->queue_ctx = NULL;
-       q->queue_hw_ctx = NULL;
        q->mq_map = NULL;
 
        mutex_lock(&all_q_mutex);
index 206230e64f7915e642ce7306aec8b949deca43b1..6a48c4c0d8a2a6efb881ea29b772df3bba9d5540 100644 (file)
@@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q);
 void blk_mq_clone_flush_request(struct request *flush_rq,
                struct request *orig_rq);
 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
+void blk_mq_wake_waiters(struct request_queue *q);
 
 /*
  * CPU hotplug helpers
@@ -61,6 +62,8 @@ extern void blk_mq_sysfs_unregister(struct request_queue *q);
 
 extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
 
+void blk_mq_release(struct request_queue *q);
+
 /*
  * Basic implementation of sparser bitmap, allowing the user to spread
  * the bits over more cachelines.
index 935ea2aa0730289a6de653aa28a53a4132ef5368..faaf36ade7ebdc2fdd363f174978bfb5683a4f9a 100644 (file)
@@ -517,6 +517,8 @@ static void blk_release_queue(struct kobject *kobj)
 
        if (!q->mq_ops)
                blk_free_flush_queue(q->fq);
+       else
+               blk_mq_release(q);
 
        blk_trace_shutdown(q);
 
index 56c025894cdf2d73f78c5346c9c5987d5deb0e37..246dfb16c3d988c4f84749065a66977b825c98b5 100644 (file)
@@ -190,6 +190,9 @@ void blk_add_timer(struct request *req)
        struct request_queue *q = req->q;
        unsigned long expiry;
 
+       if (req->cmd_flags & REQ_NO_TIMEOUT)
+               return;
+
        /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
        if (!q->mq_ops && !q->rq_timed_out_fn)
                return;
index 9b3c54c1cbe826a8cb031a9affb9079f0961d1c4..3dd101144a58def9c38adae3cdf21f810cdbe0aa 100644 (file)
@@ -1475,3 +1475,4 @@ module_exit(aes_fini);
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_ALIAS_CRYPTO("aes");
+MODULE_ALIAS_CRYPTO("aes-generic");
index 1fa7bc31be63b9fb774782ebe3fbdaed0b16ff72..eb78fe8a60c8e175af9b5e976510997eb87777c9 100644 (file)
@@ -338,49 +338,31 @@ static const struct net_proto_family alg_family = {
        .owner  =       THIS_MODULE,
 };
 
-int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
-                  int write)
+int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
 {
-       unsigned long from = (unsigned long)addr;
-       unsigned long npages;
-       unsigned off;
-       int err;
-       int i;
-
-       err = -EFAULT;
-       if (!access_ok(write ? VERIFY_READ : VERIFY_WRITE, addr, len))
-               goto out;
+       size_t off;
+       ssize_t n;
+       int npages, i;
 
-       off = from & ~PAGE_MASK;
-       npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       if (npages > ALG_MAX_PAGES)
-               npages = ALG_MAX_PAGES;
+       n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
+       if (n < 0)
+               return n;
 
-       err = get_user_pages_fast(from, npages, write, sgl->pages);
-       if (err < 0)
-               goto out;
-
-       npages = err;
-       err = -EINVAL;
+       npages = PAGE_ALIGN(off + n);
        if (WARN_ON(npages == 0))
-               goto out;
-
-       err = 0;
+               return -EINVAL;
 
        sg_init_table(sgl->sg, npages);
 
-       for (i = 0; i < npages; i++) {
+       for (i = 0, len = n; i < npages; i++) {
                int plen = min_t(int, len, PAGE_SIZE - off);
 
                sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
 
                off = 0;
                len -= plen;
-               err += plen;
        }
-
-out:
-       return err;
+       return n;
 }
 EXPORT_SYMBOL_GPL(af_alg_make_sg);
 
@@ -455,6 +437,9 @@ void af_alg_complete(struct crypto_async_request *req, int err)
 {
        struct af_alg_completion *completion = req->data;
 
+       if (err == -EINPROGRESS)
+               return;
+
        completion->err = err;
        complete(&completion->completion);
 }
index 01f56eb7816ee4320a9acd9632c3e4b5bc3233ee..01da360bdb5510b78eac0ee43630795c4a011d76 100644 (file)
@@ -41,8 +41,6 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
        struct hash_ctx *ctx = ask->private;
-       unsigned long iovlen;
-       const struct iovec *iov;
        long copied = 0;
        int err;
 
@@ -58,37 +56,28 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
 
        ctx->more = 0;
 
-       for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
-            iovlen--, iov++) {
-               unsigned long seglen = iov->iov_len;
-               char __user *from = iov->iov_base;
+       while (iov_iter_count(&msg->msg_iter)) {
+               int len = iov_iter_count(&msg->msg_iter);
 
-               while (seglen) {
-                       int len = min_t(unsigned long, seglen, limit);
-                       int newlen;
+               if (len > limit)
+                       len = limit;
 
-                       newlen = af_alg_make_sg(&ctx->sgl, from, len, 0);
-                       if (newlen < 0) {
-                               err = copied ? 0 : newlen;
-                               goto unlock;
-                       }
-
-                       ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL,
-                                               newlen);
-
-                       err = af_alg_wait_for_completion(
-                               crypto_ahash_update(&ctx->req),
-                               &ctx->completion);
+               len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len);
+               if (len < 0) {
+                       err = copied ? 0 : len;
+                       goto unlock;
+               }
 
-                       af_alg_free_sg(&ctx->sgl);
+               ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
 
-                       if (err)
-                               goto unlock;
+               err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req),
+                                                &ctx->completion);
+               af_alg_free_sg(&ctx->sgl);
+               if (err)
+                       goto unlock;
 
-                       seglen -= newlen;
-                       from += newlen;
-                       copied += newlen;
-               }
+               copied += len;
+               iov_iter_advance(&msg->msg_iter, len);
        }
 
        err = 0;
index c12207c8dde9e6b6a5365783f299623e7a221914..37110fd68adfecb84a2b78663c937f6567106da1 100644 (file)
@@ -426,67 +426,59 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
                &ctx->req));
        struct skcipher_sg_list *sgl;
        struct scatterlist *sg;
-       unsigned long iovlen;
-       const struct iovec *iov;
        int err = -EAGAIN;
        int used;
        long copied = 0;
 
        lock_sock(sk);
-       for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
-            iovlen--, iov++) {
-               unsigned long seglen = iov->iov_len;
-               char __user *from = iov->iov_base;
-
-               while (seglen) {
-                       sgl = list_first_entry(&ctx->tsgl,
-                                              struct skcipher_sg_list, list);
-                       sg = sgl->sg;
-
-                       while (!sg->length)
-                               sg++;
-
-                       if (!ctx->used) {
-                               err = skcipher_wait_for_data(sk, flags);
-                               if (err)
-                                       goto unlock;
-                       }
+       while (iov_iter_count(&msg->msg_iter)) {
+               sgl = list_first_entry(&ctx->tsgl,
+                                      struct skcipher_sg_list, list);
+               sg = sgl->sg;
 
-                       used = min_t(unsigned long, ctx->used, seglen);
+               while (!sg->length)
+                       sg++;
 
-                       used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
-                       err = used;
-                       if (err < 0)
+               used = ctx->used;
+               if (!used) {
+                       err = skcipher_wait_for_data(sk, flags);
+                       if (err)
                                goto unlock;
+               }
+
+               used = min_t(unsigned long, used, iov_iter_count(&msg->msg_iter));
+
+               used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
+               err = used;
+               if (err < 0)
+                       goto unlock;
 
-                       if (ctx->more || used < ctx->used)
-                               used -= used % bs;
+               if (ctx->more || used < ctx->used)
+                       used -= used % bs;
 
-                       err = -EINVAL;
-                       if (!used)
-                               goto free;
+               err = -EINVAL;
+               if (!used)
+                       goto free;
 
-                       ablkcipher_request_set_crypt(&ctx->req, sg,
-                                                    ctx->rsgl.sg, used,
-                                                    ctx->iv);
+               ablkcipher_request_set_crypt(&ctx->req, sg,
+                                            ctx->rsgl.sg, used,
+                                            ctx->iv);
 
-                       err = af_alg_wait_for_completion(
+               err = af_alg_wait_for_completion(
                                ctx->enc ?
                                        crypto_ablkcipher_encrypt(&ctx->req) :
                                        crypto_ablkcipher_decrypt(&ctx->req),
                                &ctx->completion);
 
 free:
-                       af_alg_free_sg(&ctx->rsgl);
+               af_alg_free_sg(&ctx->rsgl);
 
-                       if (err)
-                               goto unlock;
+               if (err)
+                       goto unlock;
 
-                       copied += used;
-                       from += used;
-                       seglen -= used;
-                       skcipher_pull_sgl(sk, used);
-               }
+               copied += used;
+               skcipher_pull_sgl(sk, used);
+               iov_iter_advance(&msg->msg_iter, used);
        }
 
        err = 0;
index b4485a108389a2f13b0ca28949e4f6b932818277..6f5bebc9bf01ebea38bca6dd616c6bd2c3ce2111 100644 (file)
@@ -477,3 +477,4 @@ MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
 module_init(prng_mod_init);
 module_exit(prng_mod_fini);
 MODULE_ALIAS_CRYPTO("stdrng");
+MODULE_ALIAS_CRYPTO("ansi_cprng");
index 7bd71f02d0dde233939716f3b0059cc758ab788c..87b392a77a9395a9e4164b7e9356e739c9f96455 100644 (file)
@@ -139,3 +139,4 @@ module_exit(blowfish_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
 MODULE_ALIAS_CRYPTO("blowfish");
+MODULE_ALIAS_CRYPTO("blowfish-generic");
index 1b74c5a3e8910741cac8c92e292b041eff40e714..a02286bf319ea3cc0b52fd21079506f0667715b3 100644 (file)
@@ -1099,3 +1099,4 @@ module_exit(camellia_fini);
 MODULE_DESCRIPTION("Camellia Cipher Algorithm");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_CRYPTO("camellia");
+MODULE_ALIAS_CRYPTO("camellia-generic");
index 84c86db67ec7a88a85fd92a93ad07af6eb935564..df5c72629383d99b9fa1c030112b8e0bfc3fbd96 100644 (file)
@@ -550,3 +550,4 @@ module_exit(cast5_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
 MODULE_ALIAS_CRYPTO("cast5");
+MODULE_ALIAS_CRYPTO("cast5-generic");
index f408f0bd8de2525ac369ae68c4bd5a5187b22e1d..058c8d755d0366532a7e824b7e22681a40624429 100644 (file)
@@ -292,3 +292,4 @@ module_exit(cast6_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
 MODULE_ALIAS_CRYPTO("cast6");
+MODULE_ALIAS_CRYPTO("cast6-generic");
index 2a062025749d925f858939933ebe67283f158562..06f1b60f02b223eeea70c000ec4055c470d4eeae 100644 (file)
@@ -171,4 +171,5 @@ MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
 MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_CRYPTO("crc32c");
+MODULE_ALIAS_CRYPTO("crc32c-generic");
 MODULE_SOFTDEP("pre: crc32c");
index 08bb4f50452085b65c0ed263a84f5c8298142149..c1229614c7e324e5ee9341d6f2be530afd437487 100644 (file)
@@ -125,3 +125,4 @@ MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
 MODULE_DESCRIPTION("T10 DIF CRC calculation.");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_CRYPTO("crct10dif");
+MODULE_ALIAS_CRYPTO("crct10dif-generic");
index 42912948776b1426ec71f2e6fe3d5debd3d67e72..a71720544d118f0134b5301924fe9ab0c572eda7 100644 (file)
@@ -983,8 +983,6 @@ static struct crypto_alg des_algs[2] = { {
        .cia_decrypt            =       des3_ede_decrypt } }
 } };
 
-MODULE_ALIAS_CRYPTO("des3_ede");
-
 static int __init des_generic_mod_init(void)
 {
        return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs));
@@ -1001,4 +999,7 @@ module_exit(des_generic_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
 MODULE_AUTHOR("Dag Arne Osvik <da@osvik.no>");
-MODULE_ALIAS("des");
+MODULE_ALIAS_CRYPTO("des");
+MODULE_ALIAS_CRYPTO("des-generic");
+MODULE_ALIAS_CRYPTO("des3_ede");
+MODULE_ALIAS_CRYPTO("des3_ede-generic");
index 4e97fae9666f6fd549235ea60c93f999ad00699c..bac70995e0640a49fbc56797c4f7b605791ff98b 100644 (file)
@@ -173,3 +173,4 @@ module_exit(ghash_mod_exit);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
 MODULE_ALIAS_CRYPTO("ghash");
+MODULE_ALIAS_CRYPTO("ghash-generic");
index 67c88b3312107c7c16e9732fa9ffba38172629f4..0224841b6579aa8a915406f7c3a944385c6fcbd6 100644 (file)
@@ -63,3 +63,4 @@ module_exit(krng_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Kernel Random Number Generator");
 MODULE_ALIAS_CRYPTO("stdrng");
+MODULE_ALIAS_CRYPTO("krng");
index 3d0f9df30ac9fe368baa63598db9426c2cd8657a..f550b5d9463074b16670129341de59e069f8509c 100644 (file)
@@ -249,3 +249,4 @@ module_exit(salsa20_generic_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
 MODULE_ALIAS_CRYPTO("salsa20");
+MODULE_ALIAS_CRYPTO("salsa20-generic");
index a53b5e2af335c95d046b85c0162dd0a5bb25e5e4..94970a794975ac2148fbc0d84bf2e830719070da 100644 (file)
@@ -667,3 +667,4 @@ MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Ci
 MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
 MODULE_ALIAS_CRYPTO("tnepres");
 MODULE_ALIAS_CRYPTO("serpent");
+MODULE_ALIAS_CRYPTO("serpent-generic");
index 039e58cfa155655f42aec3ddcb8d2761aa22b264..a3e50c37eb6f8f670e4e7da64b62d05ebc2b56d8 100644 (file)
@@ -154,3 +154,4 @@ MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
 
 MODULE_ALIAS_CRYPTO("sha1");
+MODULE_ALIAS_CRYPTO("sha1-generic");
index 5eb21b1200333e95c73f11d3343183c37331544c..b001ff5c2efcec0d657fd33c2273be4980db690d 100644 (file)
@@ -385,4 +385,6 @@ MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
 
 MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha224-generic");
 MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha256-generic");
index 8d0b19ed4f4b3fb90df2266132f5877a488b1e1c..1c3c3767e079af825e3f53b779cca7f8c416b257 100644 (file)
@@ -289,4 +289,6 @@ MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
 
 MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha384-generic");
 MODULE_ALIAS_CRYPTO("sha512");
+MODULE_ALIAS_CRYPTO("sha512-generic");
index 495be2d0077d4a2828323d2d9ec187964cd74948..b70b441c7d1e7e6135f000fa8fa58a3057671b20 100644 (file)
@@ -270,6 +270,7 @@ static void __exit tea_mod_fini(void)
        crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
 }
 
+MODULE_ALIAS_CRYPTO("tea");
 MODULE_ALIAS_CRYPTO("xtea");
 MODULE_ALIAS_CRYPTO("xeta");
 
index 6e5651c66cf8a783b235e1f8551154e8e01641de..321bc6ff2a9d1ff714b3f27e49b68f0d48ce17e0 100644 (file)
@@ -676,6 +676,7 @@ static void __exit tgr192_mod_fini(void)
        crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
 }
 
+MODULE_ALIAS_CRYPTO("tgr192");
 MODULE_ALIAS_CRYPTO("tgr160");
 MODULE_ALIAS_CRYPTO("tgr128");
 
index 523ad8c4e35918329cc08ef979d58a678f52dc5d..ebf7a3efb572715750c9529b8f54e9e724b6c5e7 100644 (file)
@@ -212,3 +212,4 @@ module_exit(twofish_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
 MODULE_ALIAS_CRYPTO("twofish");
+MODULE_ALIAS_CRYPTO("twofish-generic");
index 0de42eb3d0400b895de0cf8e70e1015dd137ff87..7ee5a043a988350770c3fe712f45b4f3995063c9 100644 (file)
@@ -1167,6 +1167,7 @@ static void __exit wp512_mod_fini(void)
        crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs));
 }
 
+MODULE_ALIAS_CRYPTO("wp512");
 MODULE_ALIAS_CRYPTO("wp384");
 MODULE_ALIAS_CRYPTO("wp256");
 
index 694d5a70d6ce16d195301aa634c5ace2b1a81ec2..c70d6e45dc1029a8be0422bd21ca979b97e0365e 100644 (file)
@@ -134,8 +134,6 @@ source "drivers/staging/Kconfig"
 
 source "drivers/platform/Kconfig"
 
-source "drivers/soc/Kconfig"
-
 source "drivers/clk/Kconfig"
 
 source "drivers/hwspinlock/Kconfig"
index 67d2334dc41ecd571f9eb0dfd907cc87104912a9..527a6da8d539ad2abb84c5397e0087c1914501af 100644 (file)
@@ -50,7 +50,10 @@ obj-$(CONFIG_RESET_CONTROLLER)       += reset/
 obj-y                          += tty/
 obj-y                          += char/
 
-# gpu/ comes after char for AGP vs DRM startup
+# iommu/ comes before gpu as gpu are using iommu controllers
+obj-$(CONFIG_IOMMU_SUPPORT)    += iommu/
+
+# gpu/ comes after char for AGP vs DRM startup and after iommu
 obj-y                          += gpu/
 
 obj-$(CONFIG_CONNECTOR)                += connector/
@@ -141,7 +144,6 @@ obj-y                               += clk/
 
 obj-$(CONFIG_MAILBOX)          += mailbox/
 obj-$(CONFIG_HWSPINLOCK)       += hwspinlock/
-obj-$(CONFIG_IOMMU_SUPPORT)    += iommu/
 obj-$(CONFIG_REMOTEPROC)       += remoteproc/
 obj-$(CONFIG_RPMSG)            += rpmsg/
 
index 4f3febf8a58954b2ca8ced8c057106529ab1f30b..e75737fd7eefbc80de3dc8731fcbec94ab321f89 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * ACPI support for Intel Lynxpoint LPSS.
  *
- * Copyright (C) 2013, 2014, Intel Corporation
+ * Copyright (C) 2013, Intel Corporation
  * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
  *          Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  *
@@ -60,8 +60,6 @@ ACPI_MODULE_NAME("acpi_lpss");
 #define LPSS_CLK_DIVIDER               BIT(2)
 #define LPSS_LTR                       BIT(3)
 #define LPSS_SAVE_CTX                  BIT(4)
-#define LPSS_DEV_PROXY                 BIT(5)
-#define LPSS_PROXY_REQ                 BIT(6)
 
 struct lpss_private_data;
 
@@ -72,10 +70,8 @@ struct lpss_device_desc {
        void (*setup)(struct lpss_private_data *pdata);
 };
 
-static struct device *proxy_device;
-
 static struct lpss_device_desc lpss_dma_desc = {
-       .flags = LPSS_CLK | LPSS_PROXY_REQ,
+       .flags = LPSS_CLK,
 };
 
 struct lpss_private_data {
@@ -150,24 +146,22 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
 };
 
 static struct lpss_device_desc byt_uart_dev_desc = {
-       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
-                LPSS_DEV_PROXY,
+       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
        .prv_offset = 0x800,
        .setup = lpss_uart_setup,
 };
 
 static struct lpss_device_desc byt_spi_dev_desc = {
-       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
-                LPSS_DEV_PROXY,
+       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
        .prv_offset = 0x400,
 };
 
 static struct lpss_device_desc byt_sdio_dev_desc = {
-       .flags = LPSS_CLK | LPSS_DEV_PROXY,
+       .flags = LPSS_CLK,
 };
 
 static struct lpss_device_desc byt_i2c_dev_desc = {
-       .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_DEV_PROXY,
+       .flags = LPSS_CLK | LPSS_SAVE_CTX,
        .prv_offset = 0x800,
        .setup = byt_i2c_setup,
 };
@@ -374,8 +368,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
        adev->driver_data = pdata;
        pdev = acpi_create_platform_device(adev);
        if (!IS_ERR_OR_NULL(pdev)) {
-               if (!proxy_device && dev_desc->flags & LPSS_DEV_PROXY)
-                       proxy_device = &pdev->dev;
                return 1;
        }
 
@@ -600,14 +592,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
        if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
                acpi_lpss_save_ctx(dev, pdata);
 
-       ret = acpi_dev_runtime_suspend(dev);
-       if (ret)
-               return ret;
-
-       if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device)
-               return pm_runtime_put_sync_suspend(proxy_device);
-
-       return 0;
+       return acpi_dev_runtime_suspend(dev);
 }
 
 static int acpi_lpss_runtime_resume(struct device *dev)
@@ -615,12 +600,6 @@ static int acpi_lpss_runtime_resume(struct device *dev)
        struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
        int ret;
 
-       if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) {
-               ret = pm_runtime_get_sync(proxy_device);
-               if (ret)
-                       return ret;
-       }
-
        ret = acpi_dev_runtime_resume(dev);
        if (ret)
                return ret;
index 1fdf5e07a1c7cb0440594b12f8b78408c1c25bd4..1020b1b53a174e58111056e2c3e88089d0180d83 100644 (file)
@@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
        acpi_status status;
        int ret;
 
-       if (pr->apic_id == -1)
+       if (pr->phys_id == -1)
                return -ENODEV;
 
        status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
@@ -180,13 +180,13 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
        cpu_maps_update_begin();
        cpu_hotplug_begin();
 
-       ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
+       ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id);
        if (ret)
                goto out;
 
        ret = arch_register_cpu(pr->id);
        if (ret) {
-               acpi_unmap_lsapic(pr->id);
+               acpi_unmap_cpu(pr->id);
                goto out;
        }
 
@@ -215,7 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
        union acpi_object object = { 0 };
        struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
        struct acpi_processor *pr = acpi_driver_data(device);
-       int apic_id, cpu_index, device_declaration = 0;
+       int phys_id, cpu_index, device_declaration = 0;
        acpi_status status = AE_OK;
        static int cpu0_initialized;
        unsigned long long value;
@@ -262,15 +262,18 @@ static int acpi_processor_get_info(struct acpi_device *device)
                pr->acpi_id = value;
        }
 
-       apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
-       if (apic_id < 0)
-               acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
-       pr->apic_id = apic_id;
+       phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id);
+       if (phys_id < 0)
+               acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
+       pr->phys_id = phys_id;
 
-       cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
+       cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
        if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
                cpu0_initialized = 1;
-               /* Handle UP system running SMP kernel, with no LAPIC in MADT */
+               /*
+                * Handle UP system running SMP kernel, with no CPU
+                * entry in MADT
+                */
                if ((cpu_index == -1) && (num_online_cpus() == 1))
                        cpu_index = 0;
        }
@@ -458,7 +461,7 @@ static void acpi_processor_remove(struct acpi_device *device)
 
        /* Remove the CPU. */
        arch_unregister_cpu(pr->id);
-       acpi_unmap_lsapic(pr->id);
+       acpi_unmap_cpu(pr->id);
 
        cpu_hotplug_done();
        cpu_maps_update_done();
index c2daa85fc9f70fa5aca61a6f2a4fbaa6b85df100..c0d44d394ca39c63f87f212f0345d0c05d9acdc4 100644 (file)
@@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device)
 
        device->power.state = ACPI_STATE_UNKNOWN;
        if (!acpi_device_is_present(device))
-               return 0;
+               return -ENXIO;
 
        result = acpi_device_get_power(device, &state);
        if (result)
index ef2d730734dcca90a9690e48021d1d9a1d925791..e24ea4e796e4b920cdc437f3f76a69eb391b85c7 100644 (file)
@@ -100,7 +100,6 @@ int acpi_bus_generate_netlink_event(const char *device_class,
        struct acpi_genl_event *event;
        void *msg_header;
        int size;
-       int result;
 
        /* allocate memory */
        size = nla_total_size(sizeof(struct acpi_genl_event)) +
@@ -137,11 +136,7 @@ int acpi_bus_generate_netlink_event(const char *device_class,
        event->data = data;
 
        /* send multicast genetlink message */
-       result = genlmsg_end(skb, msg_header);
-       if (result < 0) {
-               nlmsg_free(skb);
-               return result;
-       }
+       genlmsg_end(skb, msg_header);
 
        genlmsg_multicast(&acpi_event_genl_family, skb, 0, 0, GFP_ATOMIC);
        return 0;
index a27d31d1ba24afcd176d1151aff62da8350e54b4..9dcf83682e367e889db67cd5ae44670878893459 100644 (file)
 
 #include "internal.h"
 
-#define DO_ENUMERATION 0x01
+#define INT3401_DEVICE 0X01
 static const struct acpi_device_id int340x_thermal_device_ids[] = {
-       {"INT3400", DO_ENUMERATION },
-       {"INT3401"},
+       {"INT3400"},
+       {"INT3401", INT3401_DEVICE},
        {"INT3402"},
        {"INT3403"},
        {"INT3404"},
@@ -34,7 +34,10 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
                                        const struct acpi_device_id *id)
 {
 #if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE)
-       if (id->driver_data == DO_ENUMERATION)
+       acpi_create_platform_device(adev);
+#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE)
+       /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
+       if (id->driver_data == INT3401_DEVICE)
                acpi_create_platform_device(adev);
 #endif
        return 1;
index 5277a0ee57042b26cf78e6186a17899233801273..b1def411c0b89cbf7847b767063c5c2ab528e8a8 100644 (file)
@@ -512,7 +512,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
        dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
        if (gsi >= 0) {
                acpi_unregister_gsi(gsi);
-               dev->irq = 0;
                dev->irq_managed = 0;
        }
 }
index 342942f90a1031a3650306144d6858bb58a79b08..02e48394276c785aa84c72fcacf231b4b6cc4587 100644 (file)
@@ -69,7 +69,7 @@ static int map_madt_entry(int type, u32 acpi_id)
        unsigned long madt_end, entry;
        static struct acpi_table_madt *madt;
        static int read_madt;
-       int apic_id = -1;
+       int phys_id = -1;       /* CPU hardware ID */
 
        if (!read_madt) {
                if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
@@ -79,7 +79,7 @@ static int map_madt_entry(int type, u32 acpi_id)
        }
 
        if (!madt)
-               return apic_id;
+               return phys_id;
 
        entry = (unsigned long)madt;
        madt_end = entry + madt->header.length;
@@ -91,18 +91,18 @@ static int map_madt_entry(int type, u32 acpi_id)
                struct acpi_subtable_header *header =
                        (struct acpi_subtable_header *)entry;
                if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
-                       if (!map_lapic_id(header, acpi_id, &apic_id))
+                       if (!map_lapic_id(header, acpi_id, &phys_id))
                                break;
                } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
-                       if (!map_x2apic_id(header, type, acpi_id, &apic_id))
+                       if (!map_x2apic_id(header, type, acpi_id, &phys_id))
                                break;
                } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
-                       if (!map_lsapic_id(header, type, acpi_id, &apic_id))
+                       if (!map_lsapic_id(header, type, acpi_id, &phys_id))
                                break;
                }
                entry += header->length;
        }
-       return apic_id;
+       return phys_id;
 }
 
 static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
@@ -110,7 +110,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *obj;
        struct acpi_subtable_header *header;
-       int apic_id = -1;
+       int phys_id = -1;
 
        if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
                goto exit;
@@ -126,38 +126,38 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
 
        header = (struct acpi_subtable_header *)obj->buffer.pointer;
        if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
-               map_lapic_id(header, acpi_id, &apic_id);
+               map_lapic_id(header, acpi_id, &phys_id);
        else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
-               map_lsapic_id(header, type, acpi_id, &apic_id);
+               map_lsapic_id(header, type, acpi_id, &phys_id);
        else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
-               map_x2apic_id(header, type, acpi_id, &apic_id);
+               map_x2apic_id(header, type, acpi_id, &phys_id);
 
 exit:
        kfree(buffer.pointer);
-       return apic_id;
+       return phys_id;
 }
 
-int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
+int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
 {
-       int apic_id;
+       int phys_id;
 
-       apic_id = map_mat_entry(handle, type, acpi_id);
-       if (apic_id == -1)
-               apic_id = map_madt_entry(type, acpi_id);
+       phys_id = map_mat_entry(handle, type, acpi_id);
+       if (phys_id == -1)
+               phys_id = map_madt_entry(type, acpi_id);
 
-       return apic_id;
+       return phys_id;
 }
 
-int acpi_map_cpuid(int apic_id, u32 acpi_id)
+int acpi_map_cpuid(int phys_id, u32 acpi_id)
 {
 #ifdef CONFIG_SMP
        int i;
 #endif
 
-       if (apic_id == -1) {
+       if (phys_id == -1) {
                /*
                 * On UP processor, there is no _MAT or MADT table.
-                * So above apic_id is always set to -1.
+                * So above phys_id is always set to -1.
                 *
                 * BIOS may define multiple CPU handles even for UP processor.
                 * For example,
@@ -170,7 +170,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
                 *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
                 * }
                 *
-                * Ignores apic_id and always returns 0 for the processor
+                * Ignores phys_id and always returns 0 for the processor
                 * handle with acpi id 0 if nr_cpu_ids is 1.
                 * This should be the case if SMP tables are not found.
                 * Return -1 for other CPU's handle.
@@ -178,28 +178,28 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
                if (nr_cpu_ids <= 1 && acpi_id == 0)
                        return acpi_id;
                else
-                       return apic_id;
+                       return phys_id;
        }
 
 #ifdef CONFIG_SMP
        for_each_possible_cpu(i) {
-               if (cpu_physical_id(i) == apic_id)
+               if (cpu_physical_id(i) == phys_id)
                        return i;
        }
 #else
        /* In UP kernel, only processor 0 is valid */
-       if (apic_id == 0)
-               return apic_id;
+       if (phys_id == 0)
+               return phys_id;
 #endif
        return -1;
 }
 
 int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
 {
-       int apic_id;
+       int phys_id;
 
-       apic_id = acpi_get_apicid(handle, type, acpi_id);
+       phys_id = acpi_get_phys_id(handle, type, acpi_id);
 
-       return acpi_map_cpuid(apic_id, acpi_id);
+       return acpi_map_cpuid(phys_id, acpi_id);
 }
 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
index 16914cc308822798b091d51a15f337b28486fe51..dc4d8960684a78f12978d56b911ead4f02a2074a 100644 (file)
@@ -1001,7 +1001,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device)
        if (device->wakeup.flags.valid)
                acpi_power_resources_list_free(&device->wakeup.resources);
 
-       if (!device->flags.power_manageable)
+       if (!device->power.flags.power_resources)
                return;
 
        for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
@@ -1744,10 +1744,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
                        device->power.flags.power_resources)
                device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
 
-       if (acpi_bus_init_power(device)) {
-               acpi_free_power_resources_lists(device);
+       if (acpi_bus_init_power(device))
                device->flags.power_manageable = 0;
-       }
 }
 
 static void acpi_bus_get_flags(struct acpi_device *device)
@@ -2371,13 +2369,18 @@ static void acpi_bus_attach(struct acpi_device *device)
        /* Skip devices that are not present. */
        if (!acpi_device_is_present(device)) {
                device->flags.visited = false;
+               device->flags.power_manageable = 0;
                return;
        }
        if (device->handler)
                goto ok;
 
        if (!device->flags.initialized) {
-               acpi_bus_update_power(device, NULL);
+               device->flags.power_manageable =
+                       device->power.states[ACPI_STATE_D0].flags.valid;
+               if (acpi_bus_init_power(device))
+                       device->flags.power_manageable = 0;
+
                device->flags.initialized = true;
        }
        device->flags.visited = false;
index c72e79d2c5ad2559bce9f45ec9d26e99c04c8c2d..032db459370f85481ba091dc3761e727284fe0f3 100644 (file)
@@ -522,6 +522,16 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
                },
        },
+
+       {
+        /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
+        .callback = video_disable_native_backlight,
+        .ident = "Dell XPS15 L521X",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"),
+               },
+       },
        {}
 };
 
index a3a13605a9c42d13baed5746f9f1bfb9bc30536c..5f601553b9b043fff9ac80552ab55dbc4173c714 100644 (file)
@@ -835,6 +835,7 @@ config PATA_AT32
 config PATA_AT91
        tristate "PATA support for AT91SAM9260"
        depends on ARM && SOC_AT91SAM9
+       depends on !ARCH_MULTIPLATFORM
        help
          This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
 
index 49f1e6890587e0b7f8970fe2dbdb7d879da92871..33bb06e006c9d6cbd9689b92a08df1481b12c568 100644 (file)
@@ -325,7 +325,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
        { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
        { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
-       { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
        { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
        { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
        { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
index feeb8f1e2fe845e8f63ede5363c34b6dee64803d..cbcd2081035573e9897db0afd475224c176231d3 100644 (file)
@@ -125,10 +125,11 @@ static int xgene_ahci_restart_engine(struct ata_port *ap)
  * xgene_ahci_qc_issue - Issue commands to the device
  * @qc: Command to issue
  *
- * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
- * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
- * state machine goes into the CMFatalErrorUpdate state and locks up. By
- * restarting the dma engine, it removes the controller out of lock up state.
+ * Due to Hardware errata for IDENTIFY DEVICE command and PACKET
+ * command of ATAPI protocol set, the controller cannot clear the BSY bit
+ * after receiving the PIO setup FIS. This results in the DMA state machine
+ * going into the CMFatalErrorUpdate state and locks up. By restarting the
+ * DMA engine, it removes the controller out of lock up state.
  */
 static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
 {
@@ -137,7 +138,8 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
        struct xgene_ahci_context *ctx = hpriv->plat_data;
        int rc = 0;
 
-       if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA))
+       if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
+           (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET)))
                xgene_ahci_restart_engine(ap);
 
        rc = ahci_qc_issue(qc);
@@ -188,7 +190,7 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev,
         *
         * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
         */
-       id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8);
+       id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
 
        return 0;
 }
index 97683e45ab043be5045ae22945a5520e845cfe4f..61a9c07e0dff5b277dba35cfa135bac449f9ce84 100644 (file)
@@ -2003,7 +2003,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
 
        devslp = readl(port_mmio + PORT_DEVSLP);
        if (!(devslp & PORT_DEVSLP_DSP)) {
-               dev_err(ap->host->dev, "port does not support device sleep\n");
+               dev_info(ap->host->dev, "port does not support device sleep\n");
                return;
        }
 
index 5c84fb5c33720d5b83ba52d407656445a1dbe0ae..d1a05f9bb91f239b6b24f115b6bade4694152698 100644 (file)
@@ -4233,10 +4233,33 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "PIONEER DVD-RW  DVR-216D",   NULL,   ATA_HORKAGE_NOSETXFER },
 
        /* devices that don't properly handle queued TRIM commands */
-       { "Micron_M500*",               NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
-       { "Crucial_CT???M500SSD*",      NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
-       { "Micron_M550*",               NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
-       { "Crucial_CT*M550SSD*",        NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
+       { "Micron_M[56]*",              NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Crucial_CT*SSD*",            NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
+
+       /*
+        * As defined, the DRAT (Deterministic Read After Trim) and RZAT
+        * (Return Zero After Trim) flags in the ATA Command Set are
+        * unreliable in the sense that they only define what happens if
+        * the device successfully executed the DSM TRIM command. TRIM
+        * is only advisory, however, and the device is free to silently
+        * ignore all or parts of the request.
+        *
+        * Whitelist drives that are known to reliably return zeroes
+        * after TRIM.
+        */
+
+       /*
+        * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
+        * that model before whitelisting all other intel SSDs.
+        */
+       { "INTEL*SSDSC2MH*",            NULL,   0, },
+
+       { "INTEL*SSD*",                 NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "SSD*INTEL*",                 NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Samsung*SSD*",               NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "SAMSUNG*SSD*",               NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "ST[1248][0248]0[FH]*",       NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
 
        /*
         * Some WD SATA-I drives spin up and down erratically when the link
@@ -4748,7 +4771,10 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
                return NULL;
 
        for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
-               tag = tag < max_queue ? tag : 0;
+               if (ap->flags & ATA_FLAG_LOWTAG)
+                       tag = i;
+               else
+                       tag = tag < max_queue ? tag : 0;
 
                /* the last tag is reserved for internal command. */
                if (tag == ATA_TAG_INTERNAL)
index 3dbec8954c867e435689c313d1b1f2f383aaea1e..8d00c2638bed8ea499fb55bcd1b8b660c80be814 100644 (file)
@@ -2389,6 +2389,7 @@ const char *ata_get_cmd_descript(u8 command)
 
        return NULL;
 }
+EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
 
 /**
  *     ata_eh_link_report - report error handling to user
index e364e86e84d75b7d4ec8f18bba0eb4fc2b21a43c..6abd17a85b1369d4515302090c6eb57a2d1d088b 100644 (file)
@@ -2532,13 +2532,15 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
                rbuf[15] = lowest_aligned;
 
                if (ata_id_has_trim(args->id)) {
-                       rbuf[14] |= 0x80; /* TPE */
+                       rbuf[14] |= 0x80; /* LBPME */
 
-                       if (ata_id_has_zero_after_trim(args->id))
-                               rbuf[14] |= 0x40; /* TPRZ */
+                       if (ata_id_has_zero_after_trim(args->id) &&
+                           dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) {
+                               ata_dev_info(dev, "Enabling discard_zeroes_data\n");
+                               rbuf[14] |= 0x40; /* LBPRZ */
+                       }
                }
        }
-
        return 0;
 }
 
index db90aa35cb71e9456d176dcdd1214b760165cc4c..2e86e3b852666e3b57e55aafde26ba1c13f8c695 100644 (file)
@@ -1333,7 +1333,19 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
        DPRINTK("ENTER\n");
 
        cancel_delayed_work_sync(&ap->sff_pio_task);
+
+       /*
+        * We wanna reset the HSM state to IDLE.  If we do so without
+        * grabbing the port lock, critical sections protected by it which
+        * expect the HSM state to stay stable may get surprised.  For
+        * example, we may set IDLE in between the time
+        * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
+        * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
+        */
+       spin_lock_irq(ap->lock);
        ap->hsm_task_state = HSM_ST_IDLE;
+       spin_unlock_irq(ap->lock);
+
        ap->sff_pio_task_link = NULL;
 
        if (ata_msg_ctl(ap))
index c7ddef89e7b02695509a5fa3076d6953435d4b86..8e8248179d20577cf83d0270e324d1a867554554 100644 (file)
@@ -797,7 +797,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
        if (err) {
                dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
                        " %d\n", __func__, err);
-               goto error_out;
+               return err;
        }
 
        /* Enabe DMA */
@@ -808,11 +808,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
                sata_dma_regs);
 
        return 0;
-
-error_out:
-       dma_dwc_exit(hsdev);
-
-       return err;
 }
 
 static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
@@ -1662,7 +1657,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
        char *ver = (char *)&versionr;
        u8 *base = NULL;
        int err = 0;
-       int irq, rc;
+       int irq;
        struct ata_host *host;
        struct ata_port_info pi = sata_dwc_port_info[0];
        const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -1725,7 +1720,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
        if (irq == NO_IRQ) {
                dev_err(&ofdev->dev, "no SATA DMA irq\n");
                err = -ENODEV;
-               goto error_out;
+               goto error_iomap;
        }
 
        /* Get physical SATA DMA register base address */
@@ -1734,14 +1729,16 @@ static int sata_dwc_probe(struct platform_device *ofdev)
                dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
                        " address\n");
                err = -ENODEV;
-               goto error_out;
+               goto error_iomap;
        }
 
        /* Save dev for later use in dev_xxx() routines */
        host_pvt.dwc_dev = &ofdev->dev;
 
        /* Initialize AHB DMAC */
-       dma_dwc_init(hsdev, irq);
+       err = dma_dwc_init(hsdev, irq);
+       if (err)
+               goto error_dma_iomap;
 
        /* Enable SATA Interrupts */
        sata_dwc_enable_interrupts(hsdev);
@@ -1759,9 +1756,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
         * device discovery process, invoking our port_start() handler &
         * error_handler() to execute a dummy Softreset EH session
         */
-       rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
-
-       if (rc != 0)
+       err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
+       if (err)
                dev_err(&ofdev->dev, "failed to activate host");
 
        dev_set_drvdata(&ofdev->dev, host);
@@ -1770,7 +1766,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
 error_out:
        /* Free SATA DMA resources */
        dma_dwc_exit(hsdev);
-
+error_dma_iomap:
+       iounmap((void __iomem *)host_pvt.sata_dma_regs);
 error_iomap:
        iounmap(base);
 error_kmalloc:
@@ -1791,6 +1788,7 @@ static int sata_dwc_remove(struct platform_device *ofdev)
        /* Free SATA DMA resources */
        dma_dwc_exit(hsdev);
 
+       iounmap((void __iomem *)host_pvt.sata_dma_regs);
        iounmap(hsdev->reg_base);
        kfree(hsdev);
        kfree(host);
index d81b20ddb52736de9f9b7e15126382ebdeec2e37..ea655949023f4a6304096a33724ed2d2d8ed56c3 100644 (file)
@@ -246,7 +246,7 @@ enum {
        /* host flags */
        SIL24_COMMON_FLAGS      = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
                                  ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
-                                 ATA_FLAG_AN | ATA_FLAG_PMP,
+                                 ATA_FLAG_AN | ATA_FLAG_PMP | ATA_FLAG_LOWTAG,
        SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
 
        IRQ_STAT_4PORTS         = 0xf,
index c7fab3ee14eef1e28af86e69a253925d30f491ea..6339efd326972108267e11e84048737c6af1910f 100644 (file)
@@ -354,9 +354,9 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
        eni_vcc = ENI_VCC(vcc);
        paddr = 0; /* GCC, shut up */
        if (skb) {
-               paddr = pci_map_single(eni_dev->pci_dev,skb->data,skb->len,
-                   PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(eni_dev->pci_dev, paddr))
+               paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
+                                      DMA_FROM_DEVICE);
+               if (dma_mapping_error(&eni_dev->pci_dev->dev, paddr))
                        goto dma_map_error;
                ENI_PRV_PADDR(skb) = paddr;
                if (paddr & 3)
@@ -481,8 +481,8 @@ rx_enqueued++;
 
 trouble:
        if (paddr)
-               pci_unmap_single(eni_dev->pci_dev,paddr,skb->len,
-                   PCI_DMA_FROMDEVICE);
+               dma_unmap_single(&eni_dev->pci_dev->dev,paddr,skb->len,
+                                DMA_FROM_DEVICE);
 dma_map_error:
        if (skb) dev_kfree_skb_irq(skb);
        return -1;
@@ -758,8 +758,8 @@ rx_dequeued++;
                }
                eni_vcc->rxing--;
                eni_vcc->rx_pos = ENI_PRV_POS(skb) & (eni_vcc->words-1);
-               pci_unmap_single(eni_dev->pci_dev,ENI_PRV_PADDR(skb),skb->len,
-                   PCI_DMA_TODEVICE);
+               dma_unmap_single(&eni_dev->pci_dev->dev,ENI_PRV_PADDR(skb),skb->len,
+                                DMA_TO_DEVICE);
                if (!skb->len) dev_kfree_skb_irq(skb);
                else {
                        EVENT("pushing (len=%ld)\n",skb->len,0);
@@ -1112,8 +1112,8 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags);
                    vcc->dev->number);
                return enq_jam;
        }
-       paddr = pci_map_single(eni_dev->pci_dev,skb->data,skb->len,
-           PCI_DMA_TODEVICE);
+       paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
+                              DMA_TO_DEVICE);
        ENI_PRV_PADDR(skb) = paddr;
        /* prepare DMA queue entries */
        j = 0;
@@ -1226,8 +1226,8 @@ static void dequeue_tx(struct atm_dev *dev)
                        break;
                }
                ENI_VCC(vcc)->txing -= ENI_PRV_SIZE(skb);
-               pci_unmap_single(eni_dev->pci_dev,ENI_PRV_PADDR(skb),skb->len,
-                   PCI_DMA_TODEVICE);
+               dma_unmap_single(&eni_dev->pci_dev->dev,ENI_PRV_PADDR(skb),skb->len,
+                                DMA_TO_DEVICE);
                if (vcc->pop) vcc->pop(vcc,skb);
                else dev_kfree_skb_irq(skb);
                atomic_inc(&vcc->stats->tx);
@@ -2240,13 +2240,18 @@ static int eni_init_one(struct pci_dev *pci_dev,
        if (rc < 0)
                goto out;
 
+       rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+       if (rc < 0)
+               goto out;
+
        rc = -ENOMEM;
        eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL);
        if (!eni_dev)
                goto err_disable;
 
        zero = &eni_dev->zero;
-       zero->addr = pci_alloc_consistent(pci_dev, ENI_ZEROES_SIZE, &zero->dma);
+       zero->addr = dma_alloc_coherent(&pci_dev->dev,
+                                       ENI_ZEROES_SIZE, &zero->dma, GFP_KERNEL);
        if (!zero->addr)
                goto err_kfree;
 
@@ -2277,7 +2282,7 @@ err_eni_release:
 err_unregister:
        atm_dev_deregister(dev);
 err_free_consistent:
-       pci_free_consistent(pci_dev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
+       dma_free_coherent(&pci_dev->dev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
 err_kfree:
        kfree(eni_dev);
 err_disable:
@@ -2302,7 +2307,7 @@ static void eni_remove_one(struct pci_dev *pdev)
 
        eni_do_release(dev);
        atm_dev_deregister(dev);
-       pci_free_consistent(pdev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
+       dma_free_coherent(&pdev->dev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
        kfree(ed);
        pci_disable_device(pdev);
 }
index d5d9eafbbfcf1f0851b6db889ccb4edf8eebfadd..75dde903b2383af33387025d7f93ac5fa0d902b4 100644 (file)
@@ -425,7 +425,7 @@ static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
 static u32
 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
 {
-    u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
+    u32 dma_addr = dma_map_single(&((struct pci_dev *) fore200e->bus_dev)->dev, virt_addr, size, direction);
 
     DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d,  --> dma_addr = 0x%08x\n",
            virt_addr, size, direction, dma_addr);
@@ -440,7 +440,7 @@ fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int di
     DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
            dma_addr, size, direction);
 
-    pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
+    dma_unmap_single(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
 }
 
 
@@ -449,7 +449,7 @@ fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size,
 {
     DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
 
-    pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
+    dma_sync_single_for_cpu(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
 }
 
 static void
@@ -457,7 +457,7 @@ fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int si
 {
     DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
 
-    pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
+    dma_sync_single_for_device(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
 }
 
 
@@ -470,9 +470,10 @@ fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
 {
     /* returned chunks are page-aligned */
     chunk->alloc_size = size * nbr;
-    chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
-                                            chunk->alloc_size,
-                                            &chunk->dma_addr);
+    chunk->alloc_addr = dma_alloc_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev,
+                                          chunk->alloc_size,
+                                          &chunk->dma_addr,
+                                          GFP_KERNEL);
     
     if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
        return -ENOMEM;
@@ -488,7 +489,7 @@ fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
 static void
 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
 {
-    pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
+    dma_free_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev,
                        chunk->alloc_size,
                        chunk->alloc_addr,
                        chunk->dma_addr);
@@ -2707,6 +2708,11 @@ static int fore200e_pca_detect(struct pci_dev *pci_dev,
        err = -EINVAL;
        goto out;
     }
+
+    if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
+       err = -EINVAL;
+       goto out;
+    }
     
     fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
     if (fore200e == NULL) {
index c39702bc279d44f747ae24ae07958d2899ac9bcd..93dca2e73bf5a9d696e5c169d6499a8479901ac5 100644 (file)
@@ -359,7 +359,7 @@ static int he_init_one(struct pci_dev *pci_dev,
 
        if (pci_enable_device(pci_dev))
                return -EIO;
-       if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
+       if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
                printk(KERN_WARNING "he: no suitable dma available\n");
                err = -EIO;
                goto init_one_failure;
@@ -533,9 +533,9 @@ static void he_init_tx_lbfp(struct he_dev *he_dev)
 
 static int he_init_tpdrq(struct he_dev *he_dev)
 {
-       he_dev->tpdrq_base = pci_zalloc_consistent(he_dev->pci_dev,
-                                                  CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
-                                                  &he_dev->tpdrq_phys);
+       he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
+                                                CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
+                                                &he_dev->tpdrq_phys, GFP_KERNEL);
        if (he_dev->tpdrq_base == NULL) {
                hprintk("failed to alloc tpdrq\n");
                return -ENOMEM;
@@ -796,16 +796,16 @@ static int he_init_group(struct he_dev *he_dev, int group)
        }
 
        /* large buffer pool */
-       he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
+       he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
                                            CONFIG_RBPL_BUFSIZE, 64, 0);
        if (he_dev->rbpl_pool == NULL) {
                hprintk("unable to create rbpl pool\n");
                goto out_free_rbpl_virt;
        }
 
-       he_dev->rbpl_base = pci_zalloc_consistent(he_dev->pci_dev,
-                                                 CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
-                                                 &he_dev->rbpl_phys);
+       he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
+                                               CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
+                                               &he_dev->rbpl_phys, GFP_KERNEL);
        if (he_dev->rbpl_base == NULL) {
                hprintk("failed to alloc rbpl_base\n");
                goto out_destroy_rbpl_pool;
@@ -815,7 +815,7 @@ static int he_init_group(struct he_dev *he_dev, int group)
 
        for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
 
-               heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
+               heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
                if (!heb)
                        goto out_free_rbpl;
                heb->mapping = mapping;
@@ -842,9 +842,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
 
        /* rx buffer ready queue */
 
-       he_dev->rbrq_base = pci_zalloc_consistent(he_dev->pci_dev,
-                                                 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
-                                                 &he_dev->rbrq_phys);
+       he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
+                                               CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
+                                               &he_dev->rbrq_phys, GFP_KERNEL);
        if (he_dev->rbrq_base == NULL) {
                hprintk("failed to allocate rbrq\n");
                goto out_free_rbpl;
@@ -866,9 +866,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
 
        /* tx buffer ready queue */
 
-       he_dev->tbrq_base = pci_zalloc_consistent(he_dev->pci_dev,
-                                                 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
-                                                 &he_dev->tbrq_phys);
+       he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
+                                               CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
+                                               &he_dev->tbrq_phys, GFP_KERNEL);
        if (he_dev->tbrq_base == NULL) {
                hprintk("failed to allocate tbrq\n");
                goto out_free_rbpq_base;
@@ -884,18 +884,18 @@ static int he_init_group(struct he_dev *he_dev, int group)
        return 0;
 
 out_free_rbpq_base:
-       pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
-                       sizeof(struct he_rbrq), he_dev->rbrq_base,
-                       he_dev->rbrq_phys);
+       dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
+                         sizeof(struct he_rbrq), he_dev->rbrq_base,
+                         he_dev->rbrq_phys);
 out_free_rbpl:
        list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
-               pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
+               dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
 
-       pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
-                       sizeof(struct he_rbp), he_dev->rbpl_base,
-                       he_dev->rbpl_phys);
+       dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
+                         sizeof(struct he_rbp), he_dev->rbpl_base,
+                         he_dev->rbpl_phys);
 out_destroy_rbpl_pool:
-       pci_pool_destroy(he_dev->rbpl_pool);
+       dma_pool_destroy(he_dev->rbpl_pool);
 out_free_rbpl_virt:
        kfree(he_dev->rbpl_virt);
 out_free_rbpl_table:
@@ -911,8 +911,11 @@ static int he_init_irq(struct he_dev *he_dev)
        /* 2.9.3.5  tail offset for each interrupt queue is located after the
                    end of the interrupt queue */
 
-       he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
-                       (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
+       he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
+                                              (CONFIG_IRQ_SIZE + 1)
+                                              * sizeof(struct he_irq),
+                                              &he_dev->irq_phys,
+                                              GFP_KERNEL);
        if (he_dev->irq_base == NULL) {
                hprintk("failed to allocate irq\n");
                return -ENOMEM;
@@ -1419,10 +1422,10 @@ static int he_start(struct atm_dev *dev)
 
        he_init_tpdrq(he_dev);
 
-       he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
-               sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
+       he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
+                                          sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
        if (he_dev->tpd_pool == NULL) {
-               hprintk("unable to create tpd pci_pool\n");
+               hprintk("unable to create tpd dma_pool\n");
                return -ENOMEM;         
        }
 
@@ -1459,9 +1462,9 @@ static int he_start(struct atm_dev *dev)
 
        /* host status page */
 
-       he_dev->hsp = pci_zalloc_consistent(he_dev->pci_dev,
-                                           sizeof(struct he_hsp),
-                                           &he_dev->hsp_phys);
+       he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
+                                         sizeof(struct he_hsp),
+                                         &he_dev->hsp_phys, GFP_KERNEL);
        if (he_dev->hsp == NULL) {
                hprintk("failed to allocate host status page\n");
                return -ENOMEM;
@@ -1558,41 +1561,41 @@ he_stop(struct he_dev *he_dev)
                free_irq(he_dev->irq, he_dev);
 
        if (he_dev->irq_base)
-               pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
-                       * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
+               dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
+                                 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
 
        if (he_dev->hsp)
-               pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
-                                               he_dev->hsp, he_dev->hsp_phys);
+               dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
+                                 he_dev->hsp, he_dev->hsp_phys);
 
        if (he_dev->rbpl_base) {
                list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
-                       pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
+                       dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
 
-               pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
-                       * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
+               dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
+                                 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
        }
 
        kfree(he_dev->rbpl_virt);
        kfree(he_dev->rbpl_table);
 
        if (he_dev->rbpl_pool)
-               pci_pool_destroy(he_dev->rbpl_pool);
+               dma_pool_destroy(he_dev->rbpl_pool);
 
        if (he_dev->rbrq_base)
-               pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
-                                                       he_dev->rbrq_base, he_dev->rbrq_phys);
+               dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
+                                 he_dev->rbrq_base, he_dev->rbrq_phys);
 
        if (he_dev->tbrq_base)
-               pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
-                                                       he_dev->tbrq_base, he_dev->tbrq_phys);
+               dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
+                                 he_dev->tbrq_base, he_dev->tbrq_phys);
 
        if (he_dev->tpdrq_base)
-               pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
-                                                       he_dev->tpdrq_base, he_dev->tpdrq_phys);
+               dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
+                                 he_dev->tpdrq_base, he_dev->tpdrq_phys);
 
        if (he_dev->tpd_pool)
-               pci_pool_destroy(he_dev->tpd_pool);
+               dma_pool_destroy(he_dev->tpd_pool);
 
        if (he_dev->pci_dev) {
                pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
@@ -1610,7 +1613,7 @@ __alloc_tpd(struct he_dev *he_dev)
        struct he_tpd *tpd;
        dma_addr_t mapping;
 
-       tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
+       tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
        if (tpd == NULL)
                return NULL;
                        
@@ -1681,7 +1684,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
                        if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
                                clear_bit(i, he_dev->rbpl_table);
                                list_del(&heb->entry);
-                               pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
+                               dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
                        }
                                        
                        goto next_rbrq_entry;
@@ -1774,7 +1777,7 @@ return_host_buffers:
                ++pdus_assembled;
 
                list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
-                       pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
+                       dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
                INIT_LIST_HEAD(&he_vcc->buffers);
                he_vcc->pdu_len = 0;
 
@@ -1843,10 +1846,10 @@ he_service_tbrq(struct he_dev *he_dev, int group)
 
                for (slot = 0; slot < TPD_MAXIOV; ++slot) {
                        if (tpd->iovec[slot].addr)
-                               pci_unmap_single(he_dev->pci_dev,
+                               dma_unmap_single(&he_dev->pci_dev->dev,
                                        tpd->iovec[slot].addr,
                                        tpd->iovec[slot].len & TPD_LEN_MASK,
-                                                       PCI_DMA_TODEVICE);
+                                                       DMA_TO_DEVICE);
                        if (tpd->iovec[slot].len & TPD_LST)
                                break;
                                
@@ -1861,7 +1864,7 @@ he_service_tbrq(struct he_dev *he_dev, int group)
 
 next_tbrq_entry:
                if (tpd)
-                       pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
+                       dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
                he_dev->tbrq_head = (struct he_tbrq *)
                                ((unsigned long) he_dev->tbrq_base |
                                        TBRQ_MASK(he_dev->tbrq_head + 1));
@@ -1905,7 +1908,7 @@ he_service_rbpl(struct he_dev *he_dev, int group)
                }
                he_dev->rbpl_hint = i + 1;
 
-               heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
+               heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
                if (!heb)
                        break;
                heb->mapping = mapping;
@@ -2084,10 +2087,10 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
                         */
                        for (slot = 0; slot < TPD_MAXIOV; ++slot) {
                                if (tpd->iovec[slot].addr)
-                                       pci_unmap_single(he_dev->pci_dev,
+                                       dma_unmap_single(&he_dev->pci_dev->dev,
                                                tpd->iovec[slot].addr,
                                                tpd->iovec[slot].len & TPD_LEN_MASK,
-                                                               PCI_DMA_TODEVICE);
+                                                               DMA_TO_DEVICE);
                        }
                        if (tpd->skb) {
                                if (tpd->vcc->pop)
@@ -2096,7 +2099,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
                                        dev_kfree_skb_any(tpd->skb);
                                atomic_inc(&tpd->vcc->stats->tx_err);
                        }
-                       pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
+                       dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
                        return;
                }
        }
@@ -2550,8 +2553,8 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
        }
 
 #ifdef USE_SCATTERGATHER
-       tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
-                               skb_headlen(skb), PCI_DMA_TODEVICE);
+       tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
+                               skb_headlen(skb), DMA_TO_DEVICE);
        tpd->iovec[slot].len = skb_headlen(skb);
        ++slot;
 
@@ -2579,9 +2582,9 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
                        slot = 0;
                }
 
-               tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
+               tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev,
                        (void *) page_address(frag->page) + frag->page_offset,
-                               frag->size, PCI_DMA_TODEVICE);
+                               frag->size, DMA_TO_DEVICE);
                tpd->iovec[slot].len = frag->size;
                ++slot;
 
@@ -2589,7 +2592,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
 
        tpd->iovec[slot - 1].len |= TPD_LST;
 #else
-       tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+       tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
        tpd->length0 = skb->len | TPD_LST;
 #endif
        tpd->status |= TPD_INT;
index 110a27d2ecfc5b4e4aa9a9a6e8762117be73db8f..f3f53674ef3fce339664b152ad0a7e7defe07f9d 100644 (file)
@@ -281,7 +281,7 @@ struct he_dev {
        int irq_peak;
 
        struct tasklet_struct tasklet;
-       struct pci_pool *tpd_pool;
+       struct dma_pool *tpd_pool;
        struct list_head outstanding_tpds;
 
        dma_addr_t tpdrq_phys;
@@ -296,7 +296,7 @@ struct he_dev {
        struct he_buff **rbpl_virt;
        unsigned long *rbpl_table;
        unsigned long rbpl_hint;
-       struct pci_pool *rbpl_pool;
+       struct dma_pool *rbpl_pool;
        dma_addr_t rbpl_phys;
        struct he_rbp *rbpl_base, *rbpl_tail;
        struct list_head rbpl_outstanding;
index 1dc0519333f291ab859b58739e762b2aa5ae5834..527bbd595e3796debc2c0e2d60ea0a8e3f504df3 100644 (file)
@@ -458,12 +458,6 @@ static inline void update_tx_channel_config (hrz_dev * dev, short chan, u8 mode,
     return;
 }
 
-static inline u16 query_tx_channel_config (hrz_dev * dev, short chan, u8 mode) {
-  wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF,
-          chan * TX_CHANNEL_CONFIG_MULT | mode);
-    return rd_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF);
-}
-
 /********** dump functions **********/
 
 static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
@@ -513,16 +507,6 @@ static inline void dump_framer (hrz_dev * dev) {
 
 /* RX channels are 10 bit integers, these fns are quite paranoid */
 
-static inline int channel_to_vpivci (const u16 channel, short * vpi, int * vci) {
-  unsigned short vci_bits = 10 - vpi_bits;
-  if ((channel & RX_CHANNEL_MASK) == channel) {
-    *vci = channel & ((~0)<<vci_bits);
-    *vpi = channel >> vci_bits;
-    return channel ? 0 : -EINVAL;
-  }
-  return -EINVAL;
-}
-
 static inline int vpivci_to_channel (u16 * channel, const short vpi, const int vci) {
   unsigned short vci_bits = 10 - vpi_bits;
   if (0 <= vpi && vpi < 1<<vpi_bits && 0 <= vci && vci < 1<<vci_bits) {
@@ -1260,14 +1244,6 @@ static u32 rx_queue_entry_next (hrz_dev * dev) {
   return rx_queue_entry;
 }
 
-/********** handle RX disabled by device **********/
-
-static inline void rx_disabled_handler (hrz_dev * dev) {
-  wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
-  // count me please
-  PRINTK (KERN_WARNING, "RX was disabled!");
-}
-
 /********** handle RX data received by device **********/
 
 // called from IRQ handler
index 2b24ed0567281fe46f94e305b75859bfbd91ea75..074616b39f4d542db8001cd6d0b1d7829dc83875 100644 (file)
@@ -641,7 +641,8 @@ alloc_scq(struct idt77252_dev *card, int class)
        scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL);
        if (!scq)
                return NULL;
-       scq->base = pci_zalloc_consistent(card->pcidev, SCQ_SIZE, &scq->paddr);
+       scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE,
+                                       &scq->paddr, GFP_KERNEL);
        if (scq->base == NULL) {
                kfree(scq);
                return NULL;
@@ -669,12 +670,12 @@ free_scq(struct idt77252_dev *card, struct scq_info *scq)
        struct sk_buff *skb;
        struct atm_vcc *vcc;
 
-       pci_free_consistent(card->pcidev, SCQ_SIZE,
-                           scq->base, scq->paddr);
+       dma_free_coherent(&card->pcidev->dev, SCQ_SIZE,
+                         scq->base, scq->paddr);
 
        while ((skb = skb_dequeue(&scq->transmit))) {
-               pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
-                                skb->len, PCI_DMA_TODEVICE);
+               dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
+                                skb->len, DMA_TO_DEVICE);
 
                vcc = ATM_SKB(skb)->vcc;
                if (vcc->pop)
@@ -684,8 +685,8 @@ free_scq(struct idt77252_dev *card, struct scq_info *scq)
        }
 
        while ((skb = skb_dequeue(&scq->pending))) {
-               pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
-                                skb->len, PCI_DMA_TODEVICE);
+               dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
+                                skb->len, DMA_TO_DEVICE);
 
                vcc = ATM_SKB(skb)->vcc;
                if (vcc->pop)
@@ -800,8 +801,8 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
        if (skb) {
                TXPRINTK("%s: freeing skb at %p.\n", card->name, skb);
 
-               pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
-                                skb->len, PCI_DMA_TODEVICE);
+               dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
+                                skb->len, DMA_TO_DEVICE);
 
                vcc = ATM_SKB(skb)->vcc;
 
@@ -846,8 +847,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
        tbd = &IDT77252_PRV_TBD(skb);
        vcc = ATM_SKB(skb)->vcc;
 
-       IDT77252_PRV_PADDR(skb) = pci_map_single(card->pcidev, skb->data,
-                                                skb->len, PCI_DMA_TODEVICE);
+       IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data,
+                                                skb->len, DMA_TO_DEVICE);
 
        error = -EINVAL;
 
@@ -924,8 +925,8 @@ done:
        return 0;
 
 errout:
-       pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
-                        skb->len, PCI_DMA_TODEVICE);
+       dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
+                        skb->len, DMA_TO_DEVICE);
        return error;
 }
 
@@ -970,8 +971,8 @@ init_rsq(struct idt77252_dev *card)
 {
        struct rsq_entry *rsqe;
 
-       card->rsq.base = pci_zalloc_consistent(card->pcidev, RSQSIZE,
-                                              &card->rsq.paddr);
+       card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE,
+                                            &card->rsq.paddr, GFP_KERNEL);
        if (card->rsq.base == NULL) {
                printk("%s: can't allocate RSQ.\n", card->name);
                return -1;
@@ -1001,8 +1002,8 @@ init_rsq(struct idt77252_dev *card)
 static void
 deinit_rsq(struct idt77252_dev *card)
 {
-       pci_free_consistent(card->pcidev, RSQSIZE,
-                           card->rsq.base, card->rsq.paddr);
+       dma_free_coherent(&card->pcidev->dev, RSQSIZE,
+                         card->rsq.base, card->rsq.paddr);
 }
 
 static void
@@ -1057,9 +1058,9 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
 
        vcc = vc->rx_vcc;
 
-       pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(skb),
-                                   skb_end_pointer(skb) - skb->data,
-                                   PCI_DMA_FROMDEVICE);
+       dma_sync_single_for_cpu(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
+                               skb_end_pointer(skb) - skb->data,
+                               DMA_FROM_DEVICE);
 
        if ((vcc->qos.aal == ATM_AAL0) ||
            (vcc->qos.aal == ATM_AAL34)) {
@@ -1180,9 +1181,9 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
                        return;
                }
 
-               pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
+               dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
                                 skb_end_pointer(skb) - skb->data,
-                                PCI_DMA_FROMDEVICE);
+                                DMA_FROM_DEVICE);
                sb_pool_remove(card, skb);
 
                skb_trim(skb, len);
@@ -1254,9 +1255,9 @@ idt77252_rx_raw(struct idt77252_dev *card)
        head = IDT77252_PRV_PADDR(queue) + (queue->data - queue->head - 16);
        tail = readl(SAR_REG_RAWCT);
 
-       pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
-                                   skb_end_offset(queue) - 16,
-                                   PCI_DMA_FROMDEVICE);
+       dma_sync_single_for_cpu(&card->pcidev->dev, IDT77252_PRV_PADDR(queue),
+                               skb_end_offset(queue) - 16,
+                               DMA_FROM_DEVICE);
 
        while (head != tail) {
                unsigned int vpi, vci;
@@ -1348,11 +1349,11 @@ drop:
                        if (next) {
                                card->raw_cell_head = next;
                                queue = card->raw_cell_head;
-                               pci_dma_sync_single_for_cpu(card->pcidev,
-                                                           IDT77252_PRV_PADDR(queue),
-                                                           (skb_end_pointer(queue) -
-                                                            queue->data),
-                                                           PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_cpu(&card->pcidev->dev,
+                                                       IDT77252_PRV_PADDR(queue),
+                                                       (skb_end_pointer(queue) -
+                                                        queue->data),
+                                                       DMA_FROM_DEVICE);
                        } else {
                                card->raw_cell_head = NULL;
                                printk("%s: raw cell queue overrun\n",
@@ -1375,8 +1376,8 @@ init_tsq(struct idt77252_dev *card)
 {
        struct tsq_entry *tsqe;
 
-       card->tsq.base = pci_alloc_consistent(card->pcidev, RSQSIZE,
-                                             &card->tsq.paddr);
+       card->tsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE,
+                                           &card->tsq.paddr, GFP_KERNEL);
        if (card->tsq.base == NULL) {
                printk("%s: can't allocate TSQ.\n", card->name);
                return -1;
@@ -1398,8 +1399,8 @@ init_tsq(struct idt77252_dev *card)
 static void
 deinit_tsq(struct idt77252_dev *card)
 {
-       pci_free_consistent(card->pcidev, TSQSIZE,
-                           card->tsq.base, card->tsq.paddr);
+       dma_free_coherent(&card->pcidev->dev, TSQSIZE,
+                         card->tsq.base, card->tsq.paddr);
 }
 
 static void
@@ -1861,9 +1862,9 @@ add_rx_skb(struct idt77252_dev *card, int queue,
                        goto outfree;
                }
 
-               paddr = pci_map_single(card->pcidev, skb->data,
+               paddr = dma_map_single(&card->pcidev->dev, skb->data,
                                       skb_end_pointer(skb) - skb->data,
-                                      PCI_DMA_FROMDEVICE);
+                                      DMA_FROM_DEVICE);
                IDT77252_PRV_PADDR(skb) = paddr;
 
                if (push_rx_skb(card, skb, queue)) {
@@ -1875,8 +1876,8 @@ add_rx_skb(struct idt77252_dev *card, int queue,
        return;
 
 outunmap:
-       pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
-                        skb_end_pointer(skb) - skb->data, PCI_DMA_FROMDEVICE);
+       dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
+                        skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE);
 
        handle = IDT77252_PRV_POOL(skb);
        card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;
@@ -1892,15 +1893,15 @@ recycle_rx_skb(struct idt77252_dev *card, struct sk_buff *skb)
        u32 handle = IDT77252_PRV_POOL(skb);
        int err;
 
-       pci_dma_sync_single_for_device(card->pcidev, IDT77252_PRV_PADDR(skb),
-                                      skb_end_pointer(skb) - skb->data,
-                                      PCI_DMA_FROMDEVICE);
+       dma_sync_single_for_device(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
+                                  skb_end_pointer(skb) - skb->data,
+                                  DMA_FROM_DEVICE);
 
        err = push_rx_skb(card, skb, POOL_QUEUE(handle));
        if (err) {
-               pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
+               dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
                                 skb_end_pointer(skb) - skb->data,
-                                PCI_DMA_FROMDEVICE);
+                                DMA_FROM_DEVICE);
                sb_pool_remove(card, skb);
                dev_kfree_skb(skb);
        }
@@ -3058,11 +3059,11 @@ deinit_card(struct idt77252_dev *card)
                for (j = 0; j < FBQ_SIZE; j++) {
                        skb = card->sbpool[i].skb[j];
                        if (skb) {
-                               pci_unmap_single(card->pcidev,
+                               dma_unmap_single(&card->pcidev->dev,
                                                 IDT77252_PRV_PADDR(skb),
                                                 (skb_end_pointer(skb) -
                                                  skb->data),
-                                                PCI_DMA_FROMDEVICE);
+                                                DMA_FROM_DEVICE);
                                card->sbpool[i].skb[j] = NULL;
                                dev_kfree_skb(skb);
                        }
@@ -3076,8 +3077,8 @@ deinit_card(struct idt77252_dev *card)
        vfree(card->vcs);
 
        if (card->raw_cell_hnd) {
-               pci_free_consistent(card->pcidev, 2 * sizeof(u32),
-                                   card->raw_cell_hnd, card->raw_cell_paddr);
+               dma_free_coherent(&card->pcidev->dev, 2 * sizeof(u32),
+                                 card->raw_cell_hnd, card->raw_cell_paddr);
        }
 
        if (card->rsq.base) {
@@ -3397,9 +3398,10 @@ static int init_card(struct atm_dev *dev)
        writel(0, SAR_REG_GP);
 
        /* Initialize RAW Cell Handle Register  */
-       card->raw_cell_hnd = pci_zalloc_consistent(card->pcidev,
-                                                  2 * sizeof(u32),
-                                                  &card->raw_cell_paddr);
+       card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev,
+                                                2 * sizeof(u32),
+                                                &card->raw_cell_paddr,
+                                                GFP_KERNEL);
        if (!card->raw_cell_hnd) {
                printk("%s: memory allocation failure.\n", card->name);
                deinit_card(card);
@@ -3611,6 +3613,11 @@ static int idt77252_init_one(struct pci_dev *pcidev,
                return err;
        }
 
+       if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
+               printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
+               return err;
+       }
+
        card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
        if (!card) {
                printk("idt77252-%d: can't allocate private data\n", index);
index 4217f29a85e0473b9686e428cf11c75599b13b41..924f8e26789dbb3efa4bd911f2d5f3428b6c5ecb 100644 (file)
@@ -1185,8 +1185,8 @@ static int rx_pkt(struct atm_dev *dev)
 
        /* Build the DLE structure */  
        wr_ptr = iadev->rx_dle_q.write;  
-       wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
-               len, PCI_DMA_FROMDEVICE);
+       wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
+                                             len, DMA_FROM_DEVICE);
        wr_ptr->local_pkt_addr = buf_addr;  
        wr_ptr->bytes = len;    /* We don't know this do we ?? */  
        wr_ptr->mode = DMA_INT_ENABLE;  
@@ -1306,8 +1306,8 @@ static void rx_dle_intr(struct atm_dev *dev)
           u_short length;
           struct ia_vcc *ia_vcc;
 
-         pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
-               len, PCI_DMA_FROMDEVICE);
+         dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
+                          len, DMA_FROM_DEVICE);
           /* no VCC related housekeeping done as yet. lets see */  
           vcc = ATM_SKB(skb)->vcc;
          if (!vcc) {
@@ -1430,8 +1430,8 @@ static int rx_init(struct atm_dev *dev)
   //    spin_lock_init(&iadev->rx_lock); 
   
        /* Allocate 4k bytes - more aligned than needed (4k boundary) */
-       dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
-                                       &iadev->rx_dle_dma);  
+       dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
+                                     &iadev->rx_dle_dma, GFP_KERNEL);
        if (!dle_addr)  {  
                printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
                goto err_out;
@@ -1631,8 +1631,8 @@ static int rx_init(struct atm_dev *dev)
        return 0;  
 
 err_free_dle:
-       pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
-                           iadev->rx_dle_dma);  
+       dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
+                         iadev->rx_dle_dma);
 err_out:
        return -ENOMEM;
 }  
@@ -1702,8 +1702,8 @@ static void tx_dle_intr(struct atm_dev *dev)
 
            /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
            if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
-               pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
-                                PCI_DMA_TODEVICE);
+               dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
+                                DMA_TO_DEVICE);
            }
             vcc = ATM_SKB(skb)->vcc;
             if (!vcc) {
@@ -1917,8 +1917,8 @@ static int tx_init(struct atm_dev *dev)
                                 readw(iadev->seg_reg+SEG_MASK_REG));)  
 
        /* Allocate 4k (boundary aligned) bytes */
-       dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
-                                       &iadev->tx_dle_dma);  
+       dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
+                                     &iadev->tx_dle_dma, GFP_KERNEL);
        if (!dle_addr)  {
                printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
                goto err_out;
@@ -1989,8 +1989,10 @@ static int tx_init(struct atm_dev *dev)
                goto err_free_tx_bufs;
             }
            iadev->tx_buf[i].cpcs = cpcs;
-           iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
-               cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
+           iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
+                                                      cpcs,
+                                                      sizeof(*cpcs),
+                                                      DMA_TO_DEVICE);
         }
         iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
                                    sizeof(struct desc_tbl_t), GFP_KERNEL);
@@ -2198,14 +2200,14 @@ err_free_tx_bufs:
        while (--i >= 0) {
                struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
 
-               pci_unmap_single(iadev->pci, desc->dma_addr,
-                       sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
+               dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
+                                sizeof(*desc->cpcs), DMA_TO_DEVICE);
                kfree(desc->cpcs);
        }
        kfree(iadev->tx_buf);
 err_free_dle:
-       pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
-                           iadev->tx_dle_dma);  
+       dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
+                         iadev->tx_dle_dma);
 err_out:
        return -ENOMEM;
 }   
@@ -2476,20 +2478,20 @@ static void ia_free_tx(IADEV *iadev)
        for (i = 0; i < iadev->num_tx_desc; i++) {
                struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
 
-               pci_unmap_single(iadev->pci, desc->dma_addr,
-                       sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
+               dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
+                                sizeof(*desc->cpcs), DMA_TO_DEVICE);
                kfree(desc->cpcs);
        }
        kfree(iadev->tx_buf);
-       pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
-                           iadev->tx_dle_dma);  
+       dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
+                         iadev->tx_dle_dma);
 }
 
 static void ia_free_rx(IADEV *iadev)
 {
        kfree(iadev->rx_open);
-       pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
-                         iadev->rx_dle_dma);  
+       dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
+                         iadev->rx_dle_dma);
 }
 
 static int ia_start(struct atm_dev *dev)
@@ -3009,8 +3011,8 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
        /* Build the DLE structure */  
        wr_ptr = iadev->tx_dle_q.write;  
        memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
-       wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
-               skb->len, PCI_DMA_TODEVICE);
+       wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
+                                             skb->len, DMA_TO_DEVICE);
        wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
                                                   buf_desc_ptr->buf_start_lo;  
        /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
index 93eaf8d944926a706c3fe5b5bf1baac6a49c5c7c..ce43ae3e87b3513245cf5b322d5a4b4984ec9dfa 100644 (file)
@@ -346,7 +346,8 @@ static void lanai_buf_allocate(struct lanai_buffer *buf,
                 * everything, but the way the lanai uses DMA memory would
                 * make that a terrific pain.  This is much simpler.
                 */
-               buf->start = pci_alloc_consistent(pci, size, &buf->dmaaddr);
+               buf->start = dma_alloc_coherent(&pci->dev,
+                                               size, &buf->dmaaddr, GFP_KERNEL);
                if (buf->start != NULL) {       /* Success */
                        /* Lanai requires 256-byte alignment of DMA bufs */
                        APRINTK((buf->dmaaddr & ~0xFFFFFF00) == 0,
@@ -372,8 +373,8 @@ static void lanai_buf_deallocate(struct lanai_buffer *buf,
        struct pci_dev *pci)
 {
        if (buf->start != NULL) {
-               pci_free_consistent(pci, lanai_buf_size(buf),
-                   buf->start, buf->dmaaddr);
+               dma_free_coherent(&pci->dev, lanai_buf_size(buf),
+                                 buf->start, buf->dmaaddr);
                buf->start = buf->end = buf->ptr = NULL;
        }
 }
@@ -681,15 +682,6 @@ static inline int aal5_size(int size)
        return cells * 48;
 }
 
-/* How many bytes can we send if we have "space" space, assuming we have
- * to send full cells
- */
-static inline int aal5_spacefor(int space)
-{
-       int cells = space / 48;
-       return cells * 48;
-}
-
 /* -------------------- FREE AN ATM SKB: */
 
 static inline void lanai_free_skb(struct atm_vcc *atmvcc, struct sk_buff *skb)
@@ -1953,12 +1945,7 @@ static int lanai_pci_start(struct lanai_dev *lanai)
                return -ENXIO;
        }
        pci_set_master(pci);
-       if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) != 0) {
-               printk(KERN_WARNING DEV_LABEL
-                   "(itf %d): No suitable DMA available.\n", lanai->number);
-               return -EBUSY;
-       }
-       if (pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) != 0) {
+       if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32)) != 0) {
                printk(KERN_WARNING DEV_LABEL
                    "(itf %d): No suitable DMA available.\n", lanai->number);
                return -EBUSY;
index 9988ac98b6d83ee58fa2774f1c4ced4559dcee74..b7e1cc0a97c86aac6c47cb5d459f63313a9beb45 100644 (file)
@@ -252,10 +252,10 @@ static void nicstar_remove_one(struct pci_dev *pcidev)
                        free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
        }
        idr_destroy(&card->idr);
-       pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
-                           card->rsq.org, card->rsq.dma);
-       pci_free_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
-                           card->tsq.org, card->tsq.dma);
+       dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
+                         card->rsq.org, card->rsq.dma);
+       dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
+                         card->tsq.org, card->tsq.dma);
        free_irq(card->pcidev->irq, card);
        iounmap(card->membase);
        kfree(card);
@@ -370,8 +370,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
                ns_init_card_error(card, error);
                return error;
        }
-        if ((pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0) ||
-           (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0)) {
+        if (dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)) != 0) {
                 printk(KERN_WARNING
                       "nicstar%d: No suitable DMA available.\n", i);
                error = 2;
@@ -535,9 +534,9 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
        writel(0x00000000, card->membase + VPM);
 
        /* Initialize TSQ */
-       card->tsq.org = pci_alloc_consistent(card->pcidev,
-                                            NS_TSQSIZE + NS_TSQ_ALIGNMENT,
-                                            &card->tsq.dma);
+       card->tsq.org = dma_alloc_coherent(&card->pcidev->dev,
+                                          NS_TSQSIZE + NS_TSQ_ALIGNMENT,
+                                          &card->tsq.dma, GFP_KERNEL);
        if (card->tsq.org == NULL) {
                printk("nicstar%d: can't allocate TSQ.\n", i);
                error = 10;
@@ -554,9 +553,9 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
        PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base);
 
        /* Initialize RSQ */
-       card->rsq.org = pci_alloc_consistent(card->pcidev,
-                                            NS_RSQSIZE + NS_RSQ_ALIGNMENT,
-                                            &card->rsq.dma);
+       card->rsq.org = dma_alloc_coherent(&card->pcidev->dev,
+                                          NS_RSQSIZE + NS_RSQ_ALIGNMENT,
+                                          &card->rsq.dma, GFP_KERNEL);
        if (card->rsq.org == NULL) {
                printk("nicstar%d: can't allocate RSQ.\n", i);
                error = 11;
@@ -874,7 +873,8 @@ static scq_info *get_scq(ns_dev *card, int size, u32 scd)
        scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
        if (!scq)
                return NULL;
-        scq->org = pci_alloc_consistent(card->pcidev, 2 * size, &scq->dma);
+        scq->org = dma_alloc_coherent(&card->pcidev->dev,
+                                     2 * size,  &scq->dma, GFP_KERNEL);
        if (!scq->org) {
                kfree(scq);
                return NULL;
@@ -936,10 +936,10 @@ static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc)
                        }
        }
        kfree(scq->skb);
-       pci_free_consistent(card->pcidev,
-                           2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ?
-                                VBR_SCQSIZE : CBR_SCQSIZE),
-                           scq->org, scq->dma);
+       dma_free_coherent(&card->pcidev->dev,
+                         2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ?
+                              VBR_SCQSIZE : CBR_SCQSIZE),
+                         scq->org, scq->dma);
        kfree(scq);
 }
 
@@ -957,11 +957,11 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
        handle2 = NULL;
        addr2 = 0;
        handle1 = skb;
-       addr1 = pci_map_single(card->pcidev,
+       addr1 = dma_map_single(&card->pcidev->dev,
                               skb->data,
                               (NS_PRV_BUFTYPE(skb) == BUF_SM
                                ? NS_SMSKBSIZE : NS_LGSKBSIZE),
-                              PCI_DMA_TODEVICE);
+                              DMA_TO_DEVICE);
        NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */
 
 #ifdef GENERAL_DEBUG
@@ -1670,8 +1670,8 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
 
        ATM_SKB(skb)->vcc = vcc;
 
-       NS_PRV_DMA(skb) = pci_map_single(card->pcidev, skb->data,
-                                        skb->len, PCI_DMA_TODEVICE);
+       NS_PRV_DMA(skb) = dma_map_single(&card->pcidev->dev, skb->data,
+                                        skb->len, DMA_TO_DEVICE);
 
        if (vcc->qos.aal == ATM_AAL5) {
                buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */
@@ -1930,10 +1930,10 @@ static void drain_scq(ns_dev * card, scq_info * scq, int pos)
                XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n",
                        card->index, skb, i);
                if (skb != NULL) {
-                       pci_unmap_single(card->pcidev,
+                       dma_unmap_single(&card->pcidev->dev,
                                         NS_PRV_DMA(skb),
                                         skb->len,
-                                        PCI_DMA_TODEVICE);
+                                        DMA_TO_DEVICE);
                        vcc = ATM_SKB(skb)->vcc;
                        if (vcc && vcc->pop != NULL) {
                                vcc->pop(vcc, skb);
@@ -1992,16 +1992,16 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                return;
        }
        idr_remove(&card->idr, id);
-        pci_dma_sync_single_for_cpu(card->pcidev,
-                                   NS_PRV_DMA(skb),
-                                   (NS_PRV_BUFTYPE(skb) == BUF_SM
-                                    ? NS_SMSKBSIZE : NS_LGSKBSIZE),
-                                   PCI_DMA_FROMDEVICE);
-       pci_unmap_single(card->pcidev,
+       dma_sync_single_for_cpu(&card->pcidev->dev,
+                               NS_PRV_DMA(skb),
+                               (NS_PRV_BUFTYPE(skb) == BUF_SM
+                                ? NS_SMSKBSIZE : NS_LGSKBSIZE),
+                               DMA_FROM_DEVICE);
+       dma_unmap_single(&card->pcidev->dev,
                         NS_PRV_DMA(skb),
                         (NS_PRV_BUFTYPE(skb) == BUF_SM
                          ? NS_SMSKBSIZE : NS_LGSKBSIZE),
-                        PCI_DMA_FROMDEVICE);
+                        DMA_FROM_DEVICE);
        vpi = ns_rsqe_vpi(rsqe);
        vci = ns_rsqe_vci(rsqe);
        if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) {
index 21b0bc6a9c969ea677630a827f69c45545a9e78a..74e18b0a6d8945ac6df8537d394729354769e6da 100644 (file)
@@ -785,8 +785,8 @@ static void solos_bh(unsigned long card_arg)
                                skb = card->rx_skb[port];
                                card->rx_skb[port] = NULL;
 
-                               pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr,
-                                                RX_DMA_SIZE, PCI_DMA_FROMDEVICE);
+                               dma_unmap_single(&card->dev->dev, SKB_CB(skb)->dma_addr,
+                                                RX_DMA_SIZE, DMA_FROM_DEVICE);
 
                                header = (void *)skb->data;
                                size = le16_to_cpu(header->size);
@@ -872,8 +872,8 @@ static void solos_bh(unsigned long card_arg)
                        struct sk_buff *skb = alloc_skb(RX_DMA_SIZE, GFP_ATOMIC);
                        if (skb) {
                                SKB_CB(skb)->dma_addr =
-                                       pci_map_single(card->dev, skb->data,
-                                                      RX_DMA_SIZE, PCI_DMA_FROMDEVICE);
+                                       dma_map_single(&card->dev->dev, skb->data,
+                                                      RX_DMA_SIZE, DMA_FROM_DEVICE);
                                iowrite32(SKB_CB(skb)->dma_addr,
                                          card->config_regs + RX_DMA_ADDR(port));
                                card->rx_skb[port] = skb;
@@ -1069,8 +1069,8 @@ static uint32_t fpga_tx(struct solos_card *card)
                if (tx_pending & 1) {
                        struct sk_buff *oldskb = card->tx_skb[port];
                        if (oldskb) {
-                               pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr,
-                                                oldskb->len, PCI_DMA_TODEVICE);
+                               dma_unmap_single(&card->dev->dev, SKB_CB(oldskb)->dma_addr,
+                                                oldskb->len, DMA_TO_DEVICE);
                                card->tx_skb[port] = NULL;
                        }
                        spin_lock(&card->tx_queue_lock);
@@ -1089,8 +1089,8 @@ static uint32_t fpga_tx(struct solos_card *card)
                                        data = card->dma_bounce + (BUF_SIZE * port);
                                        memcpy(data, skb->data, skb->len);
                                }
-                               SKB_CB(skb)->dma_addr = pci_map_single(card->dev, data,
-                                                                      skb->len, PCI_DMA_TODEVICE);
+                               SKB_CB(skb)->dma_addr = dma_map_single(&card->dev->dev, data,
+                                                                      skb->len, DMA_TO_DEVICE);
                                card->tx_skb[port] = skb;
                                iowrite32(SKB_CB(skb)->dma_addr,
                                          card->config_regs + TX_DMA_ADDR(port));
@@ -1210,7 +1210,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
                goto out;
        }
 
-       err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+       err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
        if (err) {
                dev_warn(&dev->dev, "Failed to set 32-bit DMA mask\n");
                goto out;
@@ -1411,14 +1411,14 @@ static void atm_remove(struct solos_card *card)
 
                        skb = card->rx_skb[i];
                        if (skb) {
-                               pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr,
-                                                RX_DMA_SIZE, PCI_DMA_FROMDEVICE);
+                               dma_unmap_single(&card->dev->dev, SKB_CB(skb)->dma_addr,
+                                                RX_DMA_SIZE, DMA_FROM_DEVICE);
                                dev_kfree_skb(skb);
                        }
                        skb = card->tx_skb[i];
                        if (skb) {
-                               pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr,
-                                                skb->len, PCI_DMA_TODEVICE);
+                               dma_unmap_single(&card->dev->dev, SKB_CB(skb)->dma_addr,
+                                                skb->len, DMA_TO_DEVICE);
                                dev_kfree_skb(skb);
                        }
                        while ((skb = skb_dequeue(&card->tx_queue[i])))
index 969c3c29000c3622094068d940b854d251393673..cecfb943762f6a1be4c7973e64583767a54edce6 100644 (file)
@@ -1306,19 +1306,20 @@ static int zatm_start(struct atm_dev *dev)
 
                if (!mbx_entries[i])
                        continue;
-               mbx = pci_alloc_consistent(pdev, 2*MBX_SIZE(i), &mbx_dma);
+               mbx = dma_alloc_coherent(&pdev->dev,
+                                        2 * MBX_SIZE(i), &mbx_dma, GFP_KERNEL);
                if (!mbx) {
                        error = -ENOMEM;
                        goto out;
                }
                /*
-                * Alignment provided by pci_alloc_consistent() isn't enough
+                * Alignment provided by dma_alloc_coherent() isn't enough
                 * for this device.
                 */
                if (((unsigned long)mbx ^ mbx_dma) & 0xffff) {
                        printk(KERN_ERR DEV_LABEL "(itf %d): system "
                               "bus incompatible with driver\n", dev->number);
-                       pci_free_consistent(pdev, 2*MBX_SIZE(i), mbx, mbx_dma);
+                       dma_free_coherent(&pdev->dev, 2*MBX_SIZE(i), mbx, mbx_dma);
                        error = -ENODEV;
                        goto out;
                }
@@ -1354,9 +1355,9 @@ out_tx:
        kfree(zatm_dev->tx_map);
 out:
        while (i-- > 0) {
-               pci_free_consistent(pdev, 2*MBX_SIZE(i), 
-                                   (void *)zatm_dev->mbx_start[i],
-                                   zatm_dev->mbx_dma[i]);
+               dma_free_coherent(&pdev->dev, 2 * MBX_SIZE(i),
+                                 (void *)zatm_dev->mbx_start[i],
+                                 zatm_dev->mbx_dma[i]);
        }
        free_irq(zatm_dev->irq, dev);
        goto done;
@@ -1608,6 +1609,10 @@ static int zatm_init_one(struct pci_dev *pci_dev,
        if (ret < 0)
                goto out_disable;
 
+       ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+       if (ret < 0)
+               goto out_disable;
+
        zatm_dev->pci_dev = pci_dev;
        dev->dev_data = zatm_dev;
        zatm_dev->copper = (int)ent->driver_data;
index ae9f615382f6173c9ad6377c4bc4275403575fdf..aa2224aa7caa34d5854aebfb7ceaf4cebd29eccc 100644 (file)
@@ -530,7 +530,7 @@ static int null_add_dev(void)
                        goto out_cleanup_queues;
 
                nullb->q = blk_mq_init_queue(&nullb->tag_set);
-               if (!nullb->q) {
+               if (IS_ERR(nullb->q)) {
                        rv = -ENOMEM;
                        goto out_cleanup_tags;
                }
index b1d5d87973157b4c6e4a70b757519c37b3460201..d826bf3e62c8621e8572ca9eabb7951d42c33eb7 100644 (file)
@@ -106,7 +106,7 @@ struct nvme_queue {
        dma_addr_t cq_dma_addr;
        u32 __iomem *q_db;
        u16 q_depth;
-       u16 cq_vector;
+       s16 cq_vector;
        u16 sq_head;
        u16 sq_tail;
        u16 cq_head;
@@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
        cmd->fn = handler;
        cmd->ctx = ctx;
        cmd->aborted = 0;
+       blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
 }
 
 /* Special values must be less than 0x1000 */
@@ -431,8 +432,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
        if (unlikely(status)) {
                if (!(status & NVME_SC_DNR || blk_noretry_request(req))
                    && (jiffies - req->start_time) < req->timeout) {
+                       unsigned long flags;
+
                        blk_mq_requeue_request(req);
-                       blk_mq_kick_requeue_list(req->q);
+                       spin_lock_irqsave(req->q->queue_lock, flags);
+                       if (!blk_queue_stopped(req->q))
+                               blk_mq_kick_requeue_list(req->q);
+                       spin_unlock_irqrestore(req->q->queue_lock, flags);
                        return;
                }
                req->errors = nvme_error_status(status);
@@ -664,8 +670,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
                }
        }
 
-       blk_mq_start_request(req);
-
        nvme_set_info(cmd, iod, req_completion);
        spin_lock_irq(&nvmeq->q_lock);
        if (req->cmd_flags & REQ_DISCARD)
@@ -835,6 +839,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
        if (IS_ERR(req))
                return PTR_ERR(req);
 
+       req->cmd_flags |= REQ_NO_TIMEOUT;
        cmd_info = blk_mq_rq_to_pdu(req);
        nvme_set_info(cmd_info, req, async_req_completion);
 
@@ -1016,14 +1021,19 @@ static void nvme_abort_req(struct request *req)
        struct nvme_command cmd;
 
        if (!nvmeq->qid || cmd_rq->aborted) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&dev_list_lock, flags);
                if (work_busy(&dev->reset_work))
-                       return;
+                       goto out;
                list_del_init(&dev->node);
                dev_warn(&dev->pci_dev->dev,
                        "I/O %d QID %d timeout, reset controller\n",
                                                        req->tag, nvmeq->qid);
                dev->reset_workfn = nvme_reset_failed_dev;
                queue_work(nvme_workq, &dev->reset_work);
+ out:
+               spin_unlock_irqrestore(&dev_list_lock, flags);
                return;
        }
 
@@ -1064,15 +1074,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx,
        void *ctx;
        nvme_completion_fn fn;
        struct nvme_cmd_info *cmd;
-       static struct nvme_completion cqe = {
-               .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
-       };
+       struct nvme_completion cqe;
+
+       if (!blk_mq_request_started(req))
+               return;
 
        cmd = blk_mq_rq_to_pdu(req);
 
        if (cmd->ctx == CMD_CTX_CANCELLED)
                return;
 
+       if (blk_queue_dying(req->q))
+               cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
+       else
+               cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
+
+
        dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
                                                req->tag, nvmeq->qid);
        ctx = cancel_cmd_info(cmd, &fn);
@@ -1084,17 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = cmd->nvmeq;
 
-       dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
-                                                       nvmeq->qid);
-       if (nvmeq->dev->initialized)
-               nvme_abort_req(req);
-
        /*
         * The aborted req will be completed on receiving the abort req.
         * We enable the timer again. If hit twice, it'll cause a device reset,
         * as the device then is in a faulty state.
         */
-       return BLK_EH_RESET_TIMER;
+       int ret = BLK_EH_RESET_TIMER;
+
+       dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
+                                                       nvmeq->qid);
+
+       spin_lock_irq(&nvmeq->q_lock);
+       if (!nvmeq->dev->initialized) {
+               /*
+                * Force cancelled command frees the request, which requires we
+                * return BLK_EH_NOT_HANDLED.
+                */
+               nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
+               ret = BLK_EH_NOT_HANDLED;
+       } else
+               nvme_abort_req(req);
+       spin_unlock_irq(&nvmeq->q_lock);
+
+       return ret;
 }
 
 static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1131,10 +1160,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
  */
 static int nvme_suspend_queue(struct nvme_queue *nvmeq)
 {
-       int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
+       int vector;
 
        spin_lock_irq(&nvmeq->q_lock);
+       if (nvmeq->cq_vector == -1) {
+               spin_unlock_irq(&nvmeq->q_lock);
+               return 1;
+       }
+       vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
        nvmeq->dev->online_queues--;
+       nvmeq->cq_vector = -1;
        spin_unlock_irq(&nvmeq->q_lock);
 
        irq_set_affinity_hint(vector, NULL);
@@ -1169,11 +1204,13 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
                adapter_delete_sq(dev, qid);
                adapter_delete_cq(dev, qid);
        }
+       if (!qid && dev->admin_q)
+               blk_mq_freeze_queue_start(dev->admin_q);
        nvme_clear_queue(nvmeq);
 }
 
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
-                                                       int depth, int vector)
+                                                       int depth)
 {
        struct device *dmadev = &dev->pci_dev->dev;
        struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
@@ -1199,7 +1236,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
        nvmeq->cq_phase = 1;
        nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
        nvmeq->q_depth = depth;
-       nvmeq->cq_vector = vector;
        nvmeq->qid = qid;
        dev->queue_count++;
        dev->queues[qid] = nvmeq;
@@ -1244,6 +1280,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
        struct nvme_dev *dev = nvmeq->dev;
        int result;
 
+       nvmeq->cq_vector = qid - 1;
        result = adapter_alloc_cq(dev, qid, nvmeq);
        if (result < 0)
                return result;
@@ -1355,6 +1392,14 @@ static struct blk_mq_ops nvme_mq_ops = {
        .timeout        = nvme_timeout,
 };
 
+static void nvme_dev_remove_admin(struct nvme_dev *dev)
+{
+       if (dev->admin_q && !blk_queue_dying(dev->admin_q)) {
+               blk_cleanup_queue(dev->admin_q);
+               blk_mq_free_tag_set(&dev->admin_tagset);
+       }
+}
+
 static int nvme_alloc_admin_tags(struct nvme_dev *dev)
 {
        if (!dev->admin_q) {
@@ -1370,21 +1415,20 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
                        return -ENOMEM;
 
                dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
-               if (!dev->admin_q) {
+               if (IS_ERR(dev->admin_q)) {
                        blk_mq_free_tag_set(&dev->admin_tagset);
                        return -ENOMEM;
                }
-       }
+               if (!blk_get_queue(dev->admin_q)) {
+                       nvme_dev_remove_admin(dev);
+                       return -ENODEV;
+               }
+       } else
+               blk_mq_unfreeze_queue(dev->admin_q);
 
        return 0;
 }
 
-static void nvme_free_admin_tags(struct nvme_dev *dev)
-{
-       if (dev->admin_q)
-               blk_mq_free_tag_set(&dev->admin_tagset);
-}
-
 static int nvme_configure_admin_queue(struct nvme_dev *dev)
 {
        int result;
@@ -1416,7 +1460,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
 
        nvmeq = dev->queues[0];
        if (!nvmeq) {
-               nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0);
+               nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
                if (!nvmeq)
                        return -ENOMEM;
        }
@@ -1439,18 +1483,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
        if (result)
                goto free_nvmeq;
 
-       result = nvme_alloc_admin_tags(dev);
-       if (result)
-               goto free_nvmeq;
-
+       nvmeq->cq_vector = 0;
        result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
        if (result)
-               goto free_tags;
+               goto free_nvmeq;
 
        return result;
 
- free_tags:
-       nvme_free_admin_tags(dev);
  free_nvmeq:
        nvme_free_queues(dev, 0);
        return result;
@@ -1944,7 +1983,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev)
        unsigned i;
 
        for (i = dev->queue_count; i <= dev->max_qid; i++)
-               if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
+               if (!nvme_alloc_queue(dev, i, dev->q_depth))
                        break;
 
        for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
@@ -2235,13 +2274,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
                        break;
                if (!schedule_timeout(ADMIN_TIMEOUT) ||
                                        fatal_signal_pending(current)) {
+                       /*
+                        * Disable the controller first since we can't trust it
+                        * at this point, but leave the admin queue enabled
+                        * until all queue deletion requests are flushed.
+                        * FIXME: This may take a while if there are more h/w
+                        * queues than admin tags.
+                        */
                        set_current_state(TASK_RUNNING);
-
                        nvme_disable_ctrl(dev, readq(&dev->bar->cap));
-                       nvme_disable_queue(dev, 0);
-
-                       send_sig(SIGKILL, dq->worker->task, 1);
+                       nvme_clear_queue(dev->queues[0]);
                        flush_kthread_worker(dq->worker);
+                       nvme_disable_queue(dev, 0);
                        return;
                }
        }
@@ -2318,7 +2362,6 @@ static void nvme_del_queue_start(struct kthread_work *work)
 {
        struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
                                                        cmdinfo.work);
-       allow_signal(SIGKILL);
        if (nvme_delete_sq(nvmeq))
                nvme_del_queue_end(nvmeq);
 }
@@ -2376,6 +2419,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
                kthread_stop(tmp);
 }
 
+static void nvme_freeze_queues(struct nvme_dev *dev)
+{
+       struct nvme_ns *ns;
+
+       list_for_each_entry(ns, &dev->namespaces, list) {
+               blk_mq_freeze_queue_start(ns->queue);
+
+               spin_lock(ns->queue->queue_lock);
+               queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
+               spin_unlock(ns->queue->queue_lock);
+
+               blk_mq_cancel_requeue_work(ns->queue);
+               blk_mq_stop_hw_queues(ns->queue);
+       }
+}
+
+static void nvme_unfreeze_queues(struct nvme_dev *dev)
+{
+       struct nvme_ns *ns;
+
+       list_for_each_entry(ns, &dev->namespaces, list) {
+               queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
+               blk_mq_unfreeze_queue(ns->queue);
+               blk_mq_start_stopped_hw_queues(ns->queue, true);
+               blk_mq_kick_requeue_list(ns->queue);
+       }
+}
+
 static void nvme_dev_shutdown(struct nvme_dev *dev)
 {
        int i;
@@ -2384,8 +2455,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
        dev->initialized = 0;
        nvme_dev_list_remove(dev);
 
-       if (dev->bar)
+       if (dev->bar) {
+               nvme_freeze_queues(dev);
                csts = readl(&dev->bar->csts);
+       }
        if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
                for (i = dev->queue_count - 1; i >= 0; i--) {
                        struct nvme_queue *nvmeq = dev->queues[i];
@@ -2400,12 +2473,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
        nvme_dev_unmap(dev);
 }
 
-static void nvme_dev_remove_admin(struct nvme_dev *dev)
-{
-       if (dev->admin_q && !blk_queue_dying(dev->admin_q))
-               blk_cleanup_queue(dev->admin_q);
-}
-
 static void nvme_dev_remove(struct nvme_dev *dev)
 {
        struct nvme_ns *ns;
@@ -2413,8 +2480,10 @@ static void nvme_dev_remove(struct nvme_dev *dev)
        list_for_each_entry(ns, &dev->namespaces, list) {
                if (ns->disk->flags & GENHD_FL_UP)
                        del_gendisk(ns->disk);
-               if (!blk_queue_dying(ns->queue))
+               if (!blk_queue_dying(ns->queue)) {
+                       blk_mq_abort_requeue_list(ns->queue);
                        blk_cleanup_queue(ns->queue);
+               }
        }
 }
 
@@ -2495,6 +2564,7 @@ static void nvme_free_dev(struct kref *kref)
        nvme_free_namespaces(dev);
        nvme_release_instance(dev);
        blk_mq_free_tag_set(&dev->tagset);
+       blk_put_queue(dev->admin_q);
        kfree(dev->queues);
        kfree(dev->entry);
        kfree(dev);
@@ -2591,15 +2661,20 @@ static int nvme_dev_start(struct nvme_dev *dev)
        }
 
        nvme_init_queue(dev->queues[0], 0);
+       result = nvme_alloc_admin_tags(dev);
+       if (result)
+               goto disable;
 
        result = nvme_setup_io_queues(dev);
        if (result)
-               goto disable;
+               goto free_tags;
 
        nvme_set_irq_hints(dev);
 
        return result;
 
+ free_tags:
+       nvme_dev_remove_admin(dev);
  disable:
        nvme_disable_queue(dev, 0);
        nvme_dev_list_remove(dev);
@@ -2639,6 +2714,9 @@ static int nvme_dev_resume(struct nvme_dev *dev)
                dev->reset_workfn = nvme_remove_disks;
                queue_work(nvme_workq, &dev->reset_work);
                spin_unlock(&dev_list_lock);
+       } else {
+               nvme_unfreeze_queues(dev);
+               nvme_set_irq_hints(dev);
        }
        dev->initialized = 1;
        return 0;
@@ -2776,11 +2854,10 @@ static void nvme_remove(struct pci_dev *pdev)
        pci_set_drvdata(pdev, NULL);
        flush_work(&dev->reset_work);
        misc_deregister(&dev->miscdev);
-       nvme_dev_remove(dev);
        nvme_dev_shutdown(dev);
+       nvme_dev_remove(dev);
        nvme_dev_remove_admin(dev);
        nvme_free_queues(dev, 0);
-       nvme_free_admin_tags(dev);
        nvme_release_prp_pools(dev);
        kref_put(&dev->kref, nvme_free_dev);
 }
index 3ec85dfce12496dd64a8ad2c37984ad61f489e25..8a86b62466f7ce72b54853b283e03fd495df8083 100644 (file)
@@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
  * If an image has a non-zero parent overlap, get a reference to its
  * parent.
  *
- * We must get the reference before checking for the overlap to
- * coordinate properly with zeroing the parent overlap in
- * rbd_dev_v2_parent_info() when an image gets flattened.  We
- * drop it again if there is no overlap.
- *
  * Returns true if the rbd device has a parent with a non-zero
  * overlap and a reference for it was successfully taken, or
  * false otherwise.
  */
 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
 {
-       int counter;
+       int counter = 0;
 
        if (!rbd_dev->parent_spec)
                return false;
 
-       counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
-       if (counter > 0 && rbd_dev->parent_overlap)
-               return true;
-
-       /* Image was flattened, but parent is not yet torn down */
+       down_read(&rbd_dev->header_rwsem);
+       if (rbd_dev->parent_overlap)
+               counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
+       up_read(&rbd_dev->header_rwsem);
 
        if (counter < 0)
                rbd_warn(rbd_dev, "parent reference overflow");
 
-       return false;
+       return counter > 0;
 }
 
 /*
@@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
                 */
                if (rbd_dev->parent_overlap) {
                        rbd_dev->parent_overlap = 0;
-                       smp_mb();
                        rbd_dev_parent_put(rbd_dev);
                        pr_info("%s: clone image has been flattened\n",
                                rbd_dev->disk->disk_name);
@@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
         * treat it specially.
         */
        rbd_dev->parent_overlap = overlap;
-       smp_mb();
        if (!overlap) {
 
                /* A null parent_spec indicates it's the initial probe */
@@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
 {
        struct rbd_image_header *header;
 
-       /* Drop parent reference unless it's already been done (or none) */
-
-       if (rbd_dev->parent_overlap)
-               rbd_dev_parent_put(rbd_dev);
+       rbd_dev_parent_put(rbd_dev);
 
        /* Free dynamic fields from the header, then zero it out */
 
index 7ef7c098708fc4e482181724d574555bbb9db6d7..cdfbd21e35975178fa0c4cece78a354ef1d53007 100644 (file)
@@ -638,7 +638,7 @@ static int virtblk_probe(struct virtio_device *vdev)
                goto out_put_disk;
 
        q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
-       if (!q) {
+       if (IS_ERR(q)) {
                err = -ENOMEM;
                goto out_free_tags;
        }
index 1ee27ac18de052e660fcafc148b6dbfd76a2499f..de4c8499cbac958f0100f0004e38884839281729 100644 (file)
@@ -108,6 +108,7 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x13d3, 0x3393) },
        { USB_DEVICE(0x13d3, 0x3402) },
        { USB_DEVICE(0x13d3, 0x3408) },
+       { USB_DEVICE(0x13d3, 0x3423) },
        { USB_DEVICE(0x13d3, 0x3432) },
 
        /* Atheros AR5BBU12 with sflash firmware */
@@ -162,6 +163,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU22 with sflash firmware */
@@ -174,6 +176,8 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
 #define USB_REQ_DFU_DNLOAD     1
 #define BULK_SIZE              4096
 #define FW_HDR_SIZE            20
+#define TIMEGAP_USEC_MIN       50
+#define TIMEGAP_USEC_MAX       100
 
 static int ath3k_load_firmware(struct usb_device *udev,
                                const struct firmware *firmware)
@@ -205,6 +209,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
        pipe = usb_sndbulkpipe(udev, 0x02);
 
        while (count) {
+               /* workaround the compatibility issue with xHCI controller*/
+               usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
+
                size = min_t(uint, count, BULK_SIZE);
                memcpy(send_buf, firmware->data + sent, size);
 
@@ -302,6 +309,9 @@ static int ath3k_load_fwfile(struct usb_device *udev,
        pipe = usb_sndbulkpipe(udev, 0x02);
 
        while (count) {
+               /* workaround the compatibility issue with xHCI controller*/
+               usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
+
                size = min_t(uint, count, BULK_SIZE);
                memcpy(send_buf, firmware->data + sent, size);
 
index 330f8f84928d4d883be8b246eb78b90bad7f6159..e75f8ee2512cb08ea17579c6d853780a978b3ada 100644 (file)
@@ -28,9 +28,9 @@
 #define BTM_UPLD_SIZE                  2312
 
 /* Time to wait until Host Sleep state change in millisecond */
-#define WAIT_UNTIL_HS_STATE_CHANGED    5000
+#define WAIT_UNTIL_HS_STATE_CHANGED    msecs_to_jiffies(5000)
 /* Time to wait for command response in millisecond */
-#define WAIT_UNTIL_CMD_RESP            5000
+#define WAIT_UNTIL_CMD_RESP            msecs_to_jiffies(5000)
 
 enum rdwr_status {
        RDWR_STATUS_SUCCESS = 0,
@@ -104,6 +104,7 @@ struct btmrvl_private {
 #ifdef CONFIG_DEBUG_FS
        void *debugfs_data;
 #endif
+       bool surprise_removed;
 };
 
 #define MRVL_VENDOR_PKT                        0xFE
index 30939c993d94cc7e832063501f1bca4a3dd4d530..413597789c61d61686737ceb0b7c8501427c3b42 100644 (file)
@@ -178,6 +178,11 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
        struct sk_buff *skb;
        struct hci_command_hdr *hdr;
 
+       if (priv->surprise_removed) {
+               BT_ERR("Card is removed");
+               return -EFAULT;
+       }
+
        skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_ATOMIC);
        if (skb == NULL) {
                BT_ERR("No free skb");
@@ -202,10 +207,14 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
        wake_up_interruptible(&priv->main_thread.wait_q);
 
        if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
-                               priv->adapter->cmd_complete,
-                               msecs_to_jiffies(WAIT_UNTIL_CMD_RESP)))
+                                             priv->adapter->cmd_complete ||
+                                             priv->surprise_removed,
+                                             WAIT_UNTIL_CMD_RESP))
                return -ETIMEDOUT;
 
+       if (priv->surprise_removed)
+               return -EFAULT;
+
        return 0;
 }
 
@@ -287,9 +296,10 @@ int btmrvl_enable_hs(struct btmrvl_private *priv)
        }
 
        ret = wait_event_interruptible_timeout(adapter->event_hs_wait_q,
-                                              adapter->hs_state,
-                       msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED));
-       if (ret < 0) {
+                                              adapter->hs_state ||
+                                              priv->surprise_removed,
+                                              WAIT_UNTIL_HS_STATE_CHANGED);
+       if (ret < 0 || priv->surprise_removed) {
                BT_ERR("event_hs_wait_q terminated (%d): %d,%d,%d",
                       ret, adapter->hs_state, adapter->ps_state,
                       adapter->wakeup_tries);
@@ -538,8 +548,11 @@ static int btmrvl_check_device_tree(struct btmrvl_private *priv)
 static int btmrvl_setup(struct hci_dev *hdev)
 {
        struct btmrvl_private *priv = hci_get_drvdata(hdev);
+       int ret;
 
-       btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+       ret = btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+       if (ret)
+               return ret;
 
        priv->btmrvl_dev.gpio_gap = 0xffff;
 
@@ -597,7 +610,7 @@ static int btmrvl_service_main_thread(void *data)
                add_wait_queue(&thread->wait_q, &wait);
 
                set_current_state(TASK_INTERRUPTIBLE);
-               if (kthread_should_stop()) {
+               if (kthread_should_stop() || priv->surprise_removed) {
                        BT_DBG("main_thread: break from main thread");
                        break;
                }
@@ -616,6 +629,11 @@ static int btmrvl_service_main_thread(void *data)
 
                BT_DBG("main_thread woke up");
 
+               if (kthread_should_stop() || priv->surprise_removed) {
+                       BT_DBG("main_thread: break from main thread");
+                       break;
+               }
+
                spin_lock_irqsave(&priv->driver_lock, flags);
                if (adapter->int_count) {
                        adapter->int_count = 0;
index 0057c0b7a7761e4053e1183f467502b37dbc93a1..01d6da577eeb0713127f57c264e6345b5055781d 100644 (file)
@@ -573,7 +573,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
                offset += txlen;
        } while (true);
 
-       BT_DBG("FW download over, size %d bytes", offset);
+       BT_INFO("FW download over, size %d bytes", offset);
 
        ret = 0;
 
@@ -798,6 +798,9 @@ static void btmrvl_sdio_interrupt(struct sdio_func *func)
 
        priv = card->priv;
 
+       if (priv->surprise_removed)
+               return;
+
        if (card->reg->int_read_to_clear)
                ret = btmrvl_sdio_read_to_clear(card, &ireg);
        else
@@ -1466,6 +1469,7 @@ static void btmrvl_sdio_remove(struct sdio_func *func)
                                btmrvl_sdio_disable_host_int(card);
                        }
                        BT_DBG("unregester dev");
+                       card->priv->surprise_removed = true;
                        btmrvl_sdio_unregister_dev(card);
                        btmrvl_remove_card(card->priv);
                }
index f051a93c6cad150e5c346e12b7507823c3fb0926..b876888811432a9bad46ab73a32ca40b04ed2ce4 100644 (file)
@@ -28,7 +28,7 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
-#define VERSION "0.6"
+#define VERSION "0.7"
 
 static bool disable_scofix;
 static bool force_scofix;
@@ -49,12 +49,17 @@ static struct usb_driver btusb_driver;
 #define BTUSB_INTEL_BOOT       0x200
 #define BTUSB_BCM_PATCHRAM     0x400
 #define BTUSB_MARVELL          0x800
-#define BTUSB_AVM              0x1000
+#define BTUSB_SWAVE            0x1000
+#define BTUSB_INTEL_NEW                0x2000
+#define BTUSB_AMP              0x4000
 
 static const struct usb_device_id btusb_table[] = {
        /* Generic Bluetooth USB device */
        { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
 
+       /* Generic Bluetooth AMP device */
+       { USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP },
+
        /* Apple-specific (Broadcom) devices */
        { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
 
@@ -86,7 +91,7 @@ static const struct usb_device_id btusb_table[] = {
        { USB_DEVICE(0x05ac, 0x8281) },
 
        /* AVM BlueFRITZ! USB v2.0 */
-       { USB_DEVICE(0x057c, 0x3800), .driver_info = BTUSB_AVM },
+       { USB_DEVICE(0x057c, 0x3800), .driver_info = BTUSB_SWAVE },
 
        /* Bluetooth Ultraport Module from IBM */
        { USB_DEVICE(0x04bf, 0x030a) },
@@ -110,16 +115,24 @@ static const struct usb_device_id btusb_table[] = {
        { USB_DEVICE(0x13d3, 0x3404),
          .driver_info = BTUSB_BCM_PATCHRAM },
 
+       /* Broadcom BCM20702B0 (Dynex/Insignia) */
+       { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
+
        /* Foxconn - Hon Hai */
        { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01),
          .driver_info = BTUSB_BCM_PATCHRAM },
 
+       /* Lite-On Technology - Broadcom based */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x04ca, 0xff, 0x01, 0x01),
+         .driver_info = BTUSB_BCM_PATCHRAM },
+
        /* Broadcom devices with vendor specific id */
        { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01),
          .driver_info = BTUSB_BCM_PATCHRAM },
 
        /* ASUSTek Computer - Broadcom based */
-       { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01),
+         .driver_info = BTUSB_BCM_PATCHRAM },
 
        /* Belkin F8065bf - Broadcom based */
        { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
@@ -189,6 +202,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU12 with sflash firmware */
@@ -238,6 +252,9 @@ static const struct usb_device_id blacklist_table[] = {
        /* CONWISE Technology based adapters with buggy SCO support */
        { USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC },
 
+       /* Roper Class 1 Bluetooth Dongle (Silicon Wave based) */
+       { USB_DEVICE(0x1300, 0x0001), .driver_info = BTUSB_SWAVE },
+
        /* Digianswer devices */
        { USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER },
        { USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE },
@@ -250,13 +267,18 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x16d3, 0x0002),
          .driver_info = BTUSB_SNIFFER | BTUSB_BROKEN_ISOC },
 
-       /* Intel Bluetooth device */
+       /* Marvell Bluetooth devices */
+       { USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL },
+       { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
+
+       /* Intel Bluetooth devices */
        { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
        { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
+       { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
 
-       /* Marvell device */
-       { USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL },
-       { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
+       /* Other Intel Bluetooth devices */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
+         .driver_info = BTUSB_IGNORE },
 
        { }     /* Terminating entry */
 };
@@ -268,6 +290,11 @@ static const struct usb_device_id blacklist_table[] = {
 #define BTUSB_ISOC_RUNNING     2
 #define BTUSB_SUSPENDING       3
 #define BTUSB_DID_ISO_RESUME   4
+#define BTUSB_BOOTLOADER       5
+#define BTUSB_DOWNLOADING      6
+#define BTUSB_FIRMWARE_LOADED  7
+#define BTUSB_FIRMWARE_FAILED  8
+#define BTUSB_BOOTING          9
 
 struct btusb_data {
        struct hci_dev       *hdev;
@@ -301,14 +328,26 @@ struct btusb_data {
        struct usb_endpoint_descriptor *isoc_rx_ep;
 
        __u8 cmdreq_type;
+       __u8 cmdreq;
 
        unsigned int sco_num;
        int isoc_altsetting;
        int suspend_count;
 
+       int (*recv_event)(struct hci_dev *hdev, struct sk_buff *skb);
        int (*recv_bulk)(struct btusb_data *data, void *buffer, int count);
 };
 
+static int btusb_wait_on_bit_timeout(void *word, int bit, unsigned long timeout,
+                                    unsigned mode)
+{
+       might_sleep();
+       if (!test_bit(bit, word))
+               return 0;
+       return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout,
+                                              mode, timeout);
+}
+
 static inline void btusb_free_frags(struct btusb_data *data)
 {
        unsigned long flags;
@@ -371,7 +410,7 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
 
                if (bt_cb(skb)->expect == 0) {
                        /* Complete frame */
-                       hci_recv_frame(data->hdev, skb);
+                       data->recv_event(data->hdev, skb);
                        skb = NULL;
                }
        }
@@ -953,7 +992,7 @@ static struct urb *alloc_ctrl_urb(struct hci_dev *hdev, struct sk_buff *skb)
        }
 
        dr->bRequestType = data->cmdreq_type;
-       dr->bRequest     = 0;
+       dr->bRequest     = data->cmdreq;
        dr->wIndex       = 0;
        dr->wValue       = 0;
        dr->wLength      = __cpu_to_le16(skb->len);
@@ -1291,6 +1330,26 @@ struct intel_version {
        u8 fw_patch_num;
 } __packed;
 
+struct intel_boot_params {
+       __u8     status;
+       __u8     otp_format;
+       __u8     otp_content;
+       __u8     otp_patch;
+       __le16   dev_revid;
+       __u8     secure_boot;
+       __u8     key_from_hdr;
+       __u8     key_type;
+       __u8     otp_lock;
+       __u8     api_lock;
+       __u8     debug_lock;
+       bdaddr_t otp_bdaddr;
+       __u8     min_fw_build_nn;
+       __u8     min_fw_build_cw;
+       __u8     min_fw_build_yy;
+       __u8     limited_cce;
+       __u8     unlocked_state;
+} __packed;
+
 static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
                                                       struct intel_version *ver)
 {
@@ -1699,6 +1758,562 @@ exit_mfg_deactivate:
        return 0;
 }
 
+static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
+{
+       struct sk_buff *skb;
+       struct hci_event_hdr *hdr;
+       struct hci_ev_cmd_complete *evt;
+
+       skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
+       if (!skb)
+               return -ENOMEM;
+
+       hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
+       hdr->evt = HCI_EV_CMD_COMPLETE;
+       hdr->plen = sizeof(*evt) + 1;
+
+       evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
+       evt->ncmd = 0x01;
+       evt->opcode = cpu_to_le16(opcode);
+
+       *skb_put(skb, 1) = 0x00;
+
+       bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+
+       return hci_recv_frame(hdev, skb);
+}
+
+static int btusb_recv_bulk_intel(struct btusb_data *data, void *buffer,
+                                int count)
+{
+       /* When the device is in bootloader mode, then it can send
+        * events via the bulk endpoint. These events are treated the
+        * same way as the ones received from the interrupt endpoint.
+        */
+       if (test_bit(BTUSB_BOOTLOADER, &data->flags))
+               return btusb_recv_intr(data, buffer, count);
+
+       return btusb_recv_bulk(data, buffer, count);
+}
+
+static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct btusb_data *data = hci_get_drvdata(hdev);
+
+       if (test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+               struct hci_event_hdr *hdr = (void *)skb->data;
+
+               /* When the firmware loading completes the device sends
+                * out a vendor specific event indicating the result of
+                * the firmware loading.
+                */
+               if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 &&
+                   skb->data[2] == 0x06) {
+                       if (skb->data[3] != 0x00)
+                               test_bit(BTUSB_FIRMWARE_FAILED, &data->flags);
+
+                       if (test_and_clear_bit(BTUSB_DOWNLOADING,
+                                              &data->flags) &&
+                           test_bit(BTUSB_FIRMWARE_LOADED, &data->flags)) {
+                               smp_mb__after_atomic();
+                               wake_up_bit(&data->flags, BTUSB_DOWNLOADING);
+                       }
+               }
+
+               /* When switching to the operational firmware the device
+                * sends a vendor specific event indicating that the bootup
+                * completed.
+                */
+               if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
+                   skb->data[2] == 0x02) {
+                       if (test_and_clear_bit(BTUSB_BOOTING, &data->flags)) {
+                               smp_mb__after_atomic();
+                               wake_up_bit(&data->flags, BTUSB_BOOTING);
+                       }
+               }
+       }
+
+       return hci_recv_frame(hdev, skb);
+}
+
+static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct btusb_data *data = hci_get_drvdata(hdev);
+       struct urb *urb;
+
+       BT_DBG("%s", hdev->name);
+
+       if (!test_bit(HCI_RUNNING, &hdev->flags))
+               return -EBUSY;
+
+       switch (bt_cb(skb)->pkt_type) {
+       case HCI_COMMAND_PKT:
+               if (test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+                       struct hci_command_hdr *cmd = (void *)skb->data;
+                       __u16 opcode = le16_to_cpu(cmd->opcode);
+
+                       /* When in bootloader mode and the command 0xfc09
+                        * is received, it needs to be send down the
+                        * bulk endpoint. So allocate a bulk URB instead.
+                        */
+                       if (opcode == 0xfc09)
+                               urb = alloc_bulk_urb(hdev, skb);
+                       else
+                               urb = alloc_ctrl_urb(hdev, skb);
+
+                       /* When the 0xfc01 command is issued to boot into
+                        * the operational firmware, it will actually not
+                        * send a command complete event. To keep the flow
+                        * control working inject that event here.
+                        */
+                       if (opcode == 0xfc01)
+                               inject_cmd_complete(hdev, opcode);
+               } else {
+                       urb = alloc_ctrl_urb(hdev, skb);
+               }
+               if (IS_ERR(urb))
+                       return PTR_ERR(urb);
+
+               hdev->stat.cmd_tx++;
+               return submit_or_queue_tx_urb(hdev, urb);
+
+       case HCI_ACLDATA_PKT:
+               urb = alloc_bulk_urb(hdev, skb);
+               if (IS_ERR(urb))
+                       return PTR_ERR(urb);
+
+               hdev->stat.acl_tx++;
+               return submit_or_queue_tx_urb(hdev, urb);
+
+       case HCI_SCODATA_PKT:
+               if (hci_conn_num(hdev, SCO_LINK) < 1)
+                       return -ENODEV;
+
+               urb = alloc_isoc_urb(hdev, skb);
+               if (IS_ERR(urb))
+                       return PTR_ERR(urb);
+
+               hdev->stat.sco_tx++;
+               return submit_tx_urb(hdev, urb);
+       }
+
+       return -EILSEQ;
+}
+
+static int btusb_intel_secure_send(struct hci_dev *hdev, u8 fragment_type,
+                                  u32 plen, const void *param)
+{
+       while (plen > 0) {
+               struct sk_buff *skb;
+               u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
+
+               cmd_param[0] = fragment_type;
+               memcpy(cmd_param + 1, param, fragment_len);
+
+               skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
+                                    cmd_param, HCI_INIT_TIMEOUT);
+               if (IS_ERR(skb))
+                       return PTR_ERR(skb);
+
+               kfree_skb(skb);
+
+               plen -= fragment_len;
+               param += fragment_len;
+       }
+
+       return 0;
+}
+
+static void btusb_intel_version_info(struct hci_dev *hdev,
+                                    struct intel_version *ver)
+{
+       const char *variant;
+
+       switch (ver->fw_variant) {
+       case 0x06:
+               variant = "Bootloader";
+               break;
+       case 0x23:
+               variant = "Firmware";
+               break;
+       default:
+               return;
+       }
+
+       BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
+               variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
+               ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
+}
+
+static int btusb_setup_intel_new(struct hci_dev *hdev)
+{
+       static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
+                                         0x00, 0x08, 0x04, 0x00 };
+       struct btusb_data *data = hci_get_drvdata(hdev);
+       struct sk_buff *skb;
+       struct intel_version *ver;
+       struct intel_boot_params *params;
+       const struct firmware *fw;
+       const u8 *fw_ptr;
+       char fwname[64];
+       ktime_t calltime, delta, rettime;
+       unsigned long long duration;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       calltime = ktime_get();
+
+       /* Read the Intel version information to determine if the device
+        * is in bootloader mode or if it already has operational firmware
+        * loaded.
+        */
+       skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Reading Intel version information failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return PTR_ERR(skb);
+       }
+
+       if (skb->len != sizeof(*ver)) {
+               BT_ERR("%s: Intel version event size mismatch", hdev->name);
+               kfree_skb(skb);
+               return -EILSEQ;
+       }
+
+       ver = (struct intel_version *)skb->data;
+       if (ver->status) {
+               BT_ERR("%s: Intel version command failure (%02x)",
+                      hdev->name, ver->status);
+               err = -bt_to_errno(ver->status);
+               kfree_skb(skb);
+               return err;
+       }
+
+       /* The hardware platform number has a fixed value of 0x37 and
+        * for now only accept this single value.
+        */
+       if (ver->hw_platform != 0x37) {
+               BT_ERR("%s: Unsupported Intel hardware platform (%u)",
+                      hdev->name, ver->hw_platform);
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
+        * supported by this firmware loading method. This check has been
+        * put in place to ensure correct forward compatibility options
+        * when newer hardware variants come along.
+        */
+       if (ver->hw_variant != 0x0b) {
+               BT_ERR("%s: Unsupported Intel hardware variant (%u)",
+                      hdev->name, ver->hw_variant);
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       btusb_intel_version_info(hdev, ver);
+
+       /* The firmware variant determines if the device is in bootloader
+        * mode or is running operational firmware. The value 0x06 identifies
+        * the bootloader and the value 0x23 identifies the operational
+        * firmware.
+        *
+        * When the operational firmware is already present, then only
+        * the check for valid Bluetooth device address is needed. This
+        * determines if the device will be added as configured or
+        * unconfigured controller.
+        *
+        * It is not possible to use the Secure Boot Parameters in this
+        * case since that command is only available in bootloader mode.
+        */
+       if (ver->fw_variant == 0x23) {
+               kfree_skb(skb);
+               clear_bit(BTUSB_BOOTLOADER, &data->flags);
+               btusb_check_bdaddr_intel(hdev);
+               return 0;
+       }
+
+       /* If the device is not in bootloader mode, then the only possible
+        * choice is to return an error and abort the device initialization.
+        */
+       if (ver->fw_variant != 0x06) {
+               BT_ERR("%s: Unsupported Intel firmware variant (%u)",
+                      hdev->name, ver->fw_variant);
+               kfree_skb(skb);
+               return -ENODEV;
+       }
+
+       kfree_skb(skb);
+
+       /* Read the secure boot parameters to identify the operating
+        * details of the bootloader.
+        */
+       skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Reading Intel boot parameters failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return PTR_ERR(skb);
+       }
+
+       if (skb->len != sizeof(*params)) {
+               BT_ERR("%s: Intel boot parameters size mismatch", hdev->name);
+               kfree_skb(skb);
+               return -EILSEQ;
+       }
+
+       params = (struct intel_boot_params *)skb->data;
+       if (params->status) {
+               BT_ERR("%s: Intel boot parameters command failure (%02x)",
+                      hdev->name, params->status);
+               err = -bt_to_errno(params->status);
+               kfree_skb(skb);
+               return err;
+       }
+
+       BT_INFO("%s: Device revision is %u", hdev->name,
+               le16_to_cpu(params->dev_revid));
+
+       BT_INFO("%s: Secure boot is %s", hdev->name,
+               params->secure_boot ? "enabled" : "disabled");
+
+       BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
+               params->min_fw_build_nn, params->min_fw_build_cw,
+               2000 + params->min_fw_build_yy);
+
+       /* It is required that every single firmware fragment is acknowledged
+        * with a command complete event. If the boot parameters indicate
+        * that this bootloader does not send them, then abort the setup.
+        */
+       if (params->limited_cce != 0x00) {
+               BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
+                      hdev->name, params->limited_cce);
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       /* If the OTP has no valid Bluetooth device address, then there will
+        * also be no valid address for the operational firmware.
+        */
+       if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
+               BT_INFO("%s: No device address configured", hdev->name);
+               set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+       }
+
+       /* With this Intel bootloader only the hardware variant and device
+        * revision information are used to select the right firmware.
+        *
+        * Currently this bootloader support is limited to hardware variant
+        * iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b).
+        */
+       snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.sfi",
+                le16_to_cpu(params->dev_revid));
+
+       err = request_firmware(&fw, fwname, &hdev->dev);
+       if (err < 0) {
+               BT_ERR("%s: Failed to load Intel firmware file (%d)",
+                      hdev->name, err);
+               kfree_skb(skb);
+               return err;
+       }
+
+       BT_INFO("%s: Found device firmware: %s", hdev->name, fwname);
+
+       kfree_skb(skb);
+
+       if (fw->size < 644) {
+               BT_ERR("%s: Invalid size of firmware file (%zu)",
+                      hdev->name, fw->size);
+               err = -EBADF;
+               goto done;
+       }
+
+       set_bit(BTUSB_DOWNLOADING, &data->flags);
+
+       /* Start the firmware download transaction with the Init fragment
+        * represented by the 128 bytes of CSS header.
+        */
+       err = btusb_intel_secure_send(hdev, 0x00, 128, fw->data);
+       if (err < 0) {
+               BT_ERR("%s: Failed to send firmware header (%d)",
+                      hdev->name, err);
+               goto done;
+       }
+
+       /* Send the 256 bytes of public key information from the firmware
+        * as the PKey fragment.
+        */
+       err = btusb_intel_secure_send(hdev, 0x03, 256, fw->data + 128);
+       if (err < 0) {
+               BT_ERR("%s: Failed to send firmware public key (%d)",
+                      hdev->name, err);
+               goto done;
+       }
+
+       /* Send the 256 bytes of signature information from the firmware
+        * as the Sign fragment.
+        */
+       err = btusb_intel_secure_send(hdev, 0x02, 256, fw->data + 388);
+       if (err < 0) {
+               BT_ERR("%s: Failed to send firmware signature (%d)",
+                      hdev->name, err);
+               goto done;
+       }
+
+       fw_ptr = fw->data + 644;
+
+       while (fw_ptr - fw->data < fw->size) {
+               struct hci_command_hdr *cmd = (void *)fw_ptr;
+               u8 cmd_len;
+
+               cmd_len = sizeof(*cmd) + cmd->plen;
+
+               /* Send each command from the firmware data buffer as
+                * a single Data fragment.
+                */
+               err = btusb_intel_secure_send(hdev, 0x01, cmd_len, fw_ptr);
+               if (err < 0) {
+                       BT_ERR("%s: Failed to send firmware data (%d)",
+                              hdev->name, err);
+                       goto done;
+               }
+
+               fw_ptr += cmd_len;
+       }
+
+       set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+
+       BT_INFO("%s: Waiting for firmware download to complete", hdev->name);
+
+       /* Before switching the device into operational mode and with that
+        * booting the loaded firmware, wait for the bootloader notification
+        * that all fragments have been successfully received.
+        *
+        * When the event processing receives the notification, then the
+        * BTUSB_DOWNLOADING flag will be cleared.
+        *
+        * The firmware loading should not take longer than 5 seconds
+        * and thus just timeout if that happens and fail the setup
+        * of this device.
+        */
+       err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING,
+                                       msecs_to_jiffies(5000),
+                                       TASK_INTERRUPTIBLE);
+       if (err == 1) {
+               BT_ERR("%s: Firmware loading interrupted", hdev->name);
+               err = -EINTR;
+               goto done;
+       }
+
+       if (err) {
+               BT_ERR("%s: Firmware loading timeout", hdev->name);
+               err = -ETIMEDOUT;
+               goto done;
+       }
+
+       if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) {
+               BT_ERR("%s: Firmware loading failed", hdev->name);
+               err = -ENOEXEC;
+               goto done;
+       }
+
+       rettime = ktime_get();
+       delta = ktime_sub(rettime, calltime);
+       duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+       BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration);
+
+done:
+       release_firmware(fw);
+
+       if (err < 0)
+               return err;
+
+       calltime = ktime_get();
+
+       set_bit(BTUSB_BOOTING, &data->flags);
+
+       skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
+                            HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       kfree_skb(skb);
+
+       /* The bootloader will not indicate when the device is ready. This
+        * is done by the operational firmware sending bootup notification.
+        *
+        * Booting into operational firmware should not take longer than
+        * 1 second. However if that happens, then just fail the setup
+        * since something went wrong.
+        */
+       BT_INFO("%s: Waiting for device to boot", hdev->name);
+
+       err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_BOOTING,
+                                       msecs_to_jiffies(1000),
+                                       TASK_INTERRUPTIBLE);
+
+       if (err == 1) {
+               BT_ERR("%s: Device boot interrupted", hdev->name);
+               return -EINTR;
+       }
+
+       if (err) {
+               BT_ERR("%s: Device boot timeout", hdev->name);
+               return -ETIMEDOUT;
+       }
+
+       rettime = ktime_get();
+       delta = ktime_sub(rettime, calltime);
+       duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+       BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration);
+
+       clear_bit(BTUSB_BOOTLOADER, &data->flags);
+
+       return 0;
+}
+
+static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code)
+{
+       struct sk_buff *skb;
+       u8 type = 0x00;
+
+       BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Reset after hardware error failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return;
+       }
+       kfree_skb(skb);
+
+       skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return;
+       }
+
+       if (skb->len != 13) {
+               BT_ERR("%s: Exception info size mismatch", hdev->name);
+               kfree_skb(skb);
+               return;
+       }
+
+       if (skb->data[0] != 0x00) {
+               BT_ERR("%s: Exception info command failure (%02x)",
+                      hdev->name, skb->data[0]);
+               kfree_skb(skb);
+               return;
+       }
+
+       BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
+
+       kfree_skb(skb);
+}
+
 static int btusb_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr)
 {
        struct sk_buff *skb;
@@ -2029,7 +2644,13 @@ static int btusb_probe(struct usb_interface *intf,
        if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep)
                return -ENODEV;
 
-       data->cmdreq_type = USB_TYPE_CLASS;
+       if (id->driver_info & BTUSB_AMP) {
+               data->cmdreq_type = USB_TYPE_CLASS | 0x01;
+               data->cmdreq = 0x2b;
+       } else {
+               data->cmdreq_type = USB_TYPE_CLASS;
+               data->cmdreq = 0x00;
+       }
 
        data->udev = interface_to_usbdev(intf);
        data->intf = intf;
@@ -2045,7 +2666,14 @@ static int btusb_probe(struct usb_interface *intf,
        init_usb_anchor(&data->isoc_anchor);
        spin_lock_init(&data->rxlock);
 
-       data->recv_bulk = btusb_recv_bulk;
+       if (id->driver_info & BTUSB_INTEL_NEW) {
+               data->recv_event = btusb_recv_event_intel;
+               data->recv_bulk = btusb_recv_bulk_intel;
+               set_bit(BTUSB_BOOTLOADER, &data->flags);
+       } else {
+               data->recv_event = hci_recv_frame;
+               data->recv_bulk = btusb_recv_bulk;
+       }
 
        hdev = hci_alloc_dev();
        if (!hdev)
@@ -2054,6 +2682,11 @@ static int btusb_probe(struct usb_interface *intf,
        hdev->bus = HCI_USB;
        hci_set_drvdata(hdev, data);
 
+       if (id->driver_info & BTUSB_AMP)
+               hdev->dev_type = HCI_AMP;
+       else
+               hdev->dev_type = HCI_BREDR;
+
        data->hdev = hdev;
 
        SET_HCIDEV_DEV(hdev, &intf->dev);
@@ -2076,22 +2709,40 @@ static int btusb_probe(struct usb_interface *intf,
        if (id->driver_info & BTUSB_INTEL) {
                hdev->setup = btusb_setup_intel;
                hdev->set_bdaddr = btusb_set_bdaddr_intel;
+               set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+       }
+
+       if (id->driver_info & BTUSB_INTEL_NEW) {
+               hdev->send = btusb_send_frame_intel;
+               hdev->setup = btusb_setup_intel_new;
+               hdev->hw_error = btusb_hw_error_intel;
+               hdev->set_bdaddr = btusb_set_bdaddr_intel;
+               set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
        }
 
        if (id->driver_info & BTUSB_MARVELL)
                hdev->set_bdaddr = btusb_set_bdaddr_marvell;
 
-       if (id->driver_info & BTUSB_AVM)
+       if (id->driver_info & BTUSB_SWAVE) {
+               set_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks);
                set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
+       }
 
        if (id->driver_info & BTUSB_INTEL_BOOT)
                set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
 
-       if (id->driver_info & BTUSB_ATH3012)
+       if (id->driver_info & BTUSB_ATH3012) {
                hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+               set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+       }
 
-       /* Interface numbers are hardcoded in the specification */
-       data->isoc = usb_ifnum_to_if(data->udev, 1);
+       if (id->driver_info & BTUSB_AMP) {
+               /* AMP controllers do not support SCO packets */
+               data->isoc = NULL;
+       } else {
+               /* Interface numbers are hardcoded in the specification */
+               data->isoc = usb_ifnum_to_if(data->udev, 1);
+       }
 
        if (!reset)
                set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
@@ -2185,7 +2836,6 @@ static void btusb_disconnect(struct usb_interface *intf)
        else if (data->isoc)
                usb_driver_release_interface(&btusb_driver, data->isoc);
 
-       btusb_free_frags(data);
        hci_free_dev(hdev);
 }
 
index 860da40b78effb96b16a779f84f9ea21d05aba95..0ce5e2d65a06b5d4e6ecbdf3390554f08751d9e2 100644 (file)
@@ -1312,6 +1312,9 @@ static int cci_probe(void)
        if (!np)
                return -ENODEV;
 
+       if (!of_device_is_available(np))
+               return -ENODEV;
+
        cci_config = of_match_node(arm_cci_matches, np)->data;
        if (!cci_config)
                return -ENODEV;
index eb7682dc123be4ef22d3b0a05e305b5aa7b11045..81bf297f1034abd697135cea8c5c1db3c932cbff 100644 (file)
@@ -210,12 +210,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
 }
 
 /* Checks whether the given window number is available */
+
+/* On Armada XP, 375 and 38x the MBus window 13 has the remap
+ * capability, like windows 0 to 7. However, the mvebu-mbus driver
+ * isn't currently taking into account this special case, which means
+ * that when window 13 is actually used, the remap registers are left
+ * to 0, making the device using this MBus window unavailable. The
+ * quick fix for stable is to not use window 13. A follow up patch
+ * will correctly handle this window.
+*/
 static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
                                     const int win)
 {
        void __iomem *addr = mbus->mbuswins_base +
                mbus->soc->win_cfg_offset(win);
        u32 ctrl = readl(addr + WIN_CTRL_OFF);
+
+       if (win == 13)
+               return false;
+
        return !(ctrl & WIN_CTRL_ENABLE);
 }
 
index fd5a5e85d7dc604e2ebe237ee34ce9355d81d866..982b96323f823b8402ede2ceec7c0cb85c042ec4 100644 (file)
@@ -969,7 +969,8 @@ static void sender(void                *send_info,
 
                do_gettimeofday(&t);
                pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n",
-                      msg->data[0], msg->data[1], t.tv_sec, t.tv_usec);
+                      msg->data[0], msg->data[1],
+                      (long) t.tv_sec, (long) t.tv_usec);
        }
 }
 
index 32f7c1b36204018d0ce151601c6ca5ef6f2cf75f..2f13bd5246b5563ec399058029fcd0740c58c3d2 100644 (file)
@@ -70,6 +70,7 @@ struct clk_sam9x5_slow {
 
 #define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw)
 
+static struct clk *slow_clk;
 
 static int clk_slow_osc_prepare(struct clk_hw *hw)
 {
@@ -357,6 +358,8 @@ at91_clk_register_sam9x5_slow(void __iomem *sckcr,
        clk = clk_register(NULL, &slowck->hw);
        if (IS_ERR(clk))
                kfree(slowck);
+       else
+               slow_clk = clk;
 
        return clk;
 }
@@ -433,6 +436,8 @@ at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
        clk = clk_register(NULL, &slowck->hw);
        if (IS_ERR(clk))
                kfree(slowck);
+       else
+               slow_clk = clk;
 
        return clk;
 }
@@ -465,3 +470,25 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
 
        of_clk_add_provider(np, of_clk_src_simple_get, clk);
 }
+
+/*
+ * FIXME: All slow clk users are not properly claiming it (get + prepare +
+ * enable) before using it.
+ * If all users properly claiming this clock decide that they don't need it
+ * anymore (or are removed), it is disabled while faulty users are still
+ * requiring it, and the system hangs.
+ * Prevent this clock from being disabled until all users are properly
+ * requesting it.
+ * Once this is done we should remove this function and the slow_clk variable.
+ */
+static int __init of_at91_clk_slow_retain(void)
+{
+       if (!slow_clk)
+               return 0;
+
+       __clk_get(slow_clk);
+       clk_prepare_enable(slow_clk);
+
+       return 0;
+}
+arch_initcall(of_at91_clk_slow_retain);
index 21784e4eb3f004af06c1b980938ab4ced9bc2894..440ef81ab15c4ba8d9f70947db7e5a0d144a97a4 100644 (file)
@@ -285,7 +285,6 @@ static const struct berlin2_gate_data bg2q_gates[] __initconst = {
        { "pbridge",    "perif",        15, CLK_IGNORE_UNUSED },
        { "sdio",       "perif",        16, CLK_IGNORE_UNUSED },
        { "nfc",        "perif",        18 },
-       { "smemc",      "perif",        19 },
        { "pcie",       "perif",        22 },
 };
 
index b6e6c85507a5a7706c8c69f2c6611d908ade8611..0a47d6f49cd6f347eca03eadf283722c737c7fb2 100644 (file)
@@ -291,7 +291,7 @@ static const struct of_device_id ppc_clk_ids[] __initconst = {
        {}
 };
 
-static struct platform_driver ppc_corenet_clk_driver __initdata = {
+static struct platform_driver ppc_corenet_clk_driver = {
        .driver = {
                .name = "ppc_corenet_clock",
                .of_match_table = ppc_clk_ids,
index f4963b7d4e17d41b6a6553854c5250a7e90bfdef..d48ac71c6c8b173793a31e95ebaa93749e883a76 100644 (file)
@@ -1366,7 +1366,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
                new_rate = clk->ops->determine_rate(clk->hw, rate,
                                                    &best_parent_rate,
                                                    &parent_hw);
-               parent = parent_hw->clk;
+               parent = parent_hw ? parent_hw->clk : NULL;
        } else if (clk->ops->round_rate) {
                new_rate = clk->ops->round_rate(clk->hw, rate,
                                                &best_parent_rate);
index 75c8c45ef72849358e4b7bf2c3bbff7927761ae0..8539c4fd34cc37bd28810b93d6ffb815b0bd48d6 100644 (file)
@@ -124,10 +124,11 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
 {
        const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
        unsigned long alt_prate, alt_div;
+       unsigned long flags;
 
        alt_prate = clk_get_rate(cpuclk->alt_parent);
 
-       spin_lock(cpuclk->lock);
+       spin_lock_irqsave(cpuclk->lock, flags);
 
        /*
         * If the old parent clock speed is less than the clock speed
@@ -164,7 +165,7 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
                        cpuclk->reg_base + reg_data->core_reg);
        }
 
-       spin_unlock(cpuclk->lock);
+       spin_unlock_irqrestore(cpuclk->lock, flags);
        return 0;
 }
 
@@ -173,6 +174,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
 {
        const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
        const struct rockchip_cpuclk_rate_table *rate;
+       unsigned long flags;
 
        rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate);
        if (!rate) {
@@ -181,7 +183,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
                return -EINVAL;
        }
 
-       spin_lock(cpuclk->lock);
+       spin_lock_irqsave(cpuclk->lock, flags);
 
        if (ndata->old_rate < ndata->new_rate)
                rockchip_cpuclk_set_dividers(cpuclk, rate);
@@ -201,7 +203,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
        if (ndata->old_rate > ndata->new_rate)
                rockchip_cpuclk_set_dividers(cpuclk, rate);
 
-       spin_unlock(cpuclk->lock);
+       spin_unlock_irqrestore(cpuclk->lock, flags);
        return 0;
 }
 
index c54078960847c91f6f499ecb8f26924338bb2ab4..7eb684c50d42ce9f0d06ffa4de1ecef4270808b7 100644 (file)
@@ -210,6 +210,17 @@ PNAME(mux_sclk_hsadc_p)            = { "hsadc_src", "hsadc_frac", "ext_hsadc" };
 PNAME(mux_mac_p)               = { "gpll", "dpll" };
 PNAME(mux_sclk_macref_p)       = { "mac_src", "ext_rmii" };
 
+static struct rockchip_pll_clock rk3066_pll_clks[] __initdata = {
+       [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
+                    RK2928_MODE_CON, 0, 5, 0, rk3188_pll_rates),
+       [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4),
+                    RK2928_MODE_CON, 4, 4, 0, NULL),
+       [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8),
+                    RK2928_MODE_CON, 8, 6, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
+       [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12),
+                    RK2928_MODE_CON, 12, 7, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
+};
+
 static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = {
        [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
                     RK2928_MODE_CON, 0, 6, 0, rk3188_pll_rates),
@@ -427,11 +438,11 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
        /* hclk_peri gates */
        GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS),
        GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 6, GFLAGS),
-       GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS),
+       GATE(0, "hclk_emem_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 7, GFLAGS),
        GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS),
        GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS),
-       GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 5, GFLAGS),
-       GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
+       GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 5, GFLAGS),
+       GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 13, GFLAGS),
        GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 5, GFLAGS),
        GATE(HCLK_PIDF, "hclk_pidfilter", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 6, GFLAGS),
        GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 10, GFLAGS),
@@ -592,7 +603,8 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
        GATE(0, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
        GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
 
-       GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 14, GFLAGS),
+       GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
+                       RK2928_CLKGATE_CON(5), 14, GFLAGS),
 
        GATE(0, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS),
 
@@ -680,7 +692,8 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
        GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
        GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS),
 
-       GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
+       GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
+                       RK2928_CLKGATE_CON(7), 3, GFLAGS),
        GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
 
        GATE(PCLK_TIMER3, "pclk_timer3", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS),
@@ -735,8 +748,8 @@ static void __init rk3188_common_clk_init(struct device_node *np)
 static void __init rk3066a_clk_init(struct device_node *np)
 {
        rk3188_common_clk_init(np);
-       rockchip_clk_register_plls(rk3188_pll_clks,
-                                  ARRAY_SIZE(rk3188_pll_clks),
+       rockchip_clk_register_plls(rk3066_pll_clks,
+                                  ARRAY_SIZE(rk3066_pll_clks),
                                   RK3066_GRF_SOC_STATUS);
        rockchip_clk_register_branches(rk3066a_clk_branches,
                                  ARRAY_SIZE(rk3066a_clk_branches));
index 40d267f5dea322b7a1e1279f48f2660d174e28e6..cbcddcc02475233f53b2d150f8395a99c3b4ba4d 100644 (file)
@@ -145,20 +145,20 @@ struct rockchip_pll_rate_table rk3288_pll_rates[] = {
        }
 
 static struct rockchip_cpuclk_rate_table rk3288_cpuclk_rates[] __initdata = {
-       RK3288_CPUCLK_RATE(1800000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE(1704000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE(1608000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE(1512000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE(1416000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE(1200000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE(1008000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE( 816000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE( 696000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE( 600000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE( 408000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE( 312000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE( 216000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE( 126000000, 2, 4, 2, 4, 4),
+       RK3288_CPUCLK_RATE(1800000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE(1704000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE(1608000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE(1512000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE(1416000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE(1200000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE(1008000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE( 816000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE( 696000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE( 600000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE( 408000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE( 312000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE( 216000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE( 126000000, 1, 3, 1, 3, 3),
 };
 
 static const struct rockchip_cpuclk_reg_data rk3288_cpuclk_data = {
index 6a79fc4f900c4b56b4bd351cf050aa5c2b67175e..a3025e7ae35f1741883d610310b915040d633101 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/cpu.h>
 #include <linux/cpu_pm.h>
 #include <linux/clockchips.h>
+#include <linux/clocksource.h>
 #include <linux/interrupt.h>
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
@@ -462,7 +463,7 @@ static void __init arch_counter_register(unsigned type)
 
        /* Register the CP15 based counter if we have one */
        if (type & ARCH_CP15_TIMER) {
-               if (arch_timer_use_virtual)
+               if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
                        arch_timer_read_counter = arch_counter_get_cntvct;
                else
                        arch_timer_read_counter = arch_counter_get_cntpct;
index 0595dc6c453e6ee4a97cfb4dd30865f76c351366..f1e33d08dd834a27269062a0b42d8265fb39960c 100644 (file)
@@ -68,9 +68,8 @@ static void kona_timer_disable_and_clear(void __iomem *base)
 }
 
 static void
-kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
+kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
 {
-       void __iomem *base = IOMEM(timer_base);
        int loop_limit = 4;
 
        /*
@@ -86,9 +85,9 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
         */
 
        while (--loop_limit) {
-               *msw = readl(base + KONA_GPTIMER_STCHI_OFFSET);
-               *lsw = readl(base + KONA_GPTIMER_STCLO_OFFSET);
-               if (*msw == readl(base + KONA_GPTIMER_STCHI_OFFSET))
+               *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
+               *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
+               if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
                        break;
        }
        if (!loop_limit) {
index 9403061a2acc78397dd686208642c71012b328e7..83564c9cfdbe3b18dfb07a3799b73b991ffe00c7 100644 (file)
@@ -97,8 +97,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
        writel_relaxed(value, reg_base + offset);
 
        if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
-               stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
-               switch (offset & EXYNOS4_MCT_L_MASK) {
+               stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
+               switch (offset & ~EXYNOS4_MCT_L_MASK) {
                case MCT_L_TCON_OFFSET:
                        mask = 1 << 3;          /* L_TCON write status */
                        break;
index 0f665b8f2461f00ee58bc38918c2b684b04e16d3..f150ca82bfaf106a7ef2c5a40dd12a1e098e39f0 100644 (file)
@@ -428,7 +428,7 @@ static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
        ced->features = CLOCK_EVT_FEAT_PERIODIC;
        ced->features |= CLOCK_EVT_FEAT_ONESHOT;
        ced->rating = 200;
-       ced->cpumask = cpumask_of(0);
+       ced->cpumask = cpu_possible_mask;
        ced->set_next_event = sh_tmu_clock_event_next;
        ced->set_mode = sh_tmu_clock_event_mode;
        ced->suspend = sh_tmu_clock_event_suspend;
index 380478562b7d3187d42a64c221caf5714e6e59ec..5c062548957c3183fba608e13354d204c0a4b40c 100644 (file)
@@ -1505,7 +1505,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
        dw->regs = chip->regs;
        chip->dw = dw;
 
-       pm_runtime_enable(chip->dev);
        pm_runtime_get_sync(chip->dev);
 
        dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
@@ -1703,7 +1702,6 @@ int dw_dma_remove(struct dw_dma_chip *chip)
        }
 
        pm_runtime_put_sync_suspend(chip->dev);
-       pm_runtime_disable(chip->dev);
        return 0;
 }
 EXPORT_SYMBOL_GPL(dw_dma_remove);
index a630161473a4fa69c2586a257949d7f0956d4db7..32ea1aca7a0ea27dc28ddd58c20281f482f53c45 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/clk.h>
+#include <linux/pm_runtime.h>
 #include <linux/platform_device.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
@@ -185,6 +186,8 @@ static int dw_probe(struct platform_device *pdev)
        if (err)
                return err;
 
+       pm_runtime_enable(&pdev->dev);
+
        err = dw_dma_probe(chip, pdata);
        if (err)
                goto err_dw_dma_probe;
@@ -205,6 +208,7 @@ static int dw_probe(struct platform_device *pdev)
        return 0;
 
 err_dw_dma_probe:
+       pm_runtime_disable(&pdev->dev);
        clk_disable_unprepare(chip->clk);
        return err;
 }
@@ -217,6 +221,7 @@ static int dw_remove(struct platform_device *pdev)
                of_dma_controller_free(pdev->dev.of_node);
 
        dw_dma_remove(chip);
+       pm_runtime_disable(&pdev->dev);
        clk_disable_unprepare(chip->clk);
 
        return 0;
index 55d4803d71b0db4c3a27b909bfeb20d4d3b668ed..3d9e08f7e823edf13aa719460ecc13f8327c8b31 100644 (file)
@@ -272,7 +272,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
        for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) {
                if (pending & BIT(gpio)) {
                        virq = irq_find_mapping(cg->chip.irqdomain, gpio);
-                       generic_handle_irq(virq);
+                       handle_nested_irq(virq);
                }
        }
 
index 978b51eae2ec61bbba18db37cf05b8e93db2037e..ce3c1558cb0a6f6cfa5a818736da54ff8fc5ed07 100644 (file)
 
 #define DLN2_GPIO_MAX_PINS 32
 
-struct dln2_irq_work {
-       struct work_struct work;
-       struct dln2_gpio *dln2;
-       int pin;
-       int type;
-};
-
 struct dln2_gpio {
        struct platform_device *pdev;
        struct gpio_chip gpio;
@@ -64,10 +57,12 @@ struct dln2_gpio {
         */
        DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS);
 
-       DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS);
-       DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS);
-       DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS);
-       struct dln2_irq_work *irq_work;
+       /* active IRQs - not synced to hardware */
+       DECLARE_BITMAP(unmasked_irqs, DLN2_GPIO_MAX_PINS);
+       /* active IRQS - synced to hardware */
+       DECLARE_BITMAP(enabled_irqs, DLN2_GPIO_MAX_PINS);
+       int irq_type[DLN2_GPIO_MAX_PINS];
+       struct mutex irq_lock;
 };
 
 struct dln2_gpio_pin {
@@ -141,16 +136,16 @@ static int dln2_gpio_pin_get_out_val(struct dln2_gpio *dln2, unsigned int pin)
        return !!ret;
 }
 
-static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
-                                     unsigned int pin, int value)
+static int dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
+                                    unsigned int pin, int value)
 {
        struct dln2_gpio_pin_val req = {
                .pin = cpu_to_le16(pin),
                .value = value,
        };
 
-       dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
-                        sizeof(req));
+       return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
+                               sizeof(req));
 }
 
 #define DLN2_GPIO_DIRECTION_IN         0
@@ -267,6 +262,13 @@ static int dln2_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
                                      int value)
 {
+       struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
+       int ret;
+
+       ret = dln2_gpio_pin_set_out_val(dln2, offset, value);
+       if (ret < 0)
+               return ret;
+
        return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT);
 }
 
@@ -297,36 +299,13 @@ static int dln2_gpio_set_event_cfg(struct dln2_gpio *dln2, unsigned pin,
                                &req, sizeof(req));
 }
 
-static void dln2_irq_work(struct work_struct *w)
-{
-       struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work);
-       struct dln2_gpio *dln2 = iw->dln2;
-       u8 type = iw->type & DLN2_GPIO_EVENT_MASK;
-
-       if (test_bit(iw->pin, dln2->irqs_enabled))
-               dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0);
-       else
-               dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0);
-}
-
-static void dln2_irq_enable(struct irq_data *irqd)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
-       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
-       int pin = irqd_to_hwirq(irqd);
-
-       set_bit(pin, dln2->irqs_enabled);
-       schedule_work(&dln2->irq_work[pin].work);
-}
-
-static void dln2_irq_disable(struct irq_data *irqd)
+static void dln2_irq_unmask(struct irq_data *irqd)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
        struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
        int pin = irqd_to_hwirq(irqd);
 
-       clear_bit(pin, dln2->irqs_enabled);
-       schedule_work(&dln2->irq_work[pin].work);
+       set_bit(pin, dln2->unmasked_irqs);
 }
 
 static void dln2_irq_mask(struct irq_data *irqd)
@@ -335,27 +314,7 @@ static void dln2_irq_mask(struct irq_data *irqd)
        struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
        int pin = irqd_to_hwirq(irqd);
 
-       set_bit(pin, dln2->irqs_masked);
-}
-
-static void dln2_irq_unmask(struct irq_data *irqd)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
-       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
-       struct device *dev = dln2->gpio.dev;
-       int pin = irqd_to_hwirq(irqd);
-
-       if (test_and_clear_bit(pin, dln2->irqs_pending)) {
-               int irq;
-
-               irq = irq_find_mapping(dln2->gpio.irqdomain, pin);
-               if (!irq) {
-                       dev_err(dev, "pin %d not mapped to IRQ\n", pin);
-                       return;
-               }
-
-               generic_handle_irq(irq);
-       }
+       clear_bit(pin, dln2->unmasked_irqs);
 }
 
 static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
@@ -366,19 +325,19 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
 
        switch (type) {
        case IRQ_TYPE_LEVEL_HIGH:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_HIGH;
                break;
        case IRQ_TYPE_LEVEL_LOW:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_LOW;
                break;
        case IRQ_TYPE_EDGE_BOTH:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE;
                break;
        case IRQ_TYPE_EDGE_RISING:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_RISING;
                break;
        case IRQ_TYPE_EDGE_FALLING:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_FALLING;
                break;
        default:
                return -EINVAL;
@@ -387,13 +346,50 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
        return 0;
 }
 
+static void dln2_irq_bus_lock(struct irq_data *irqd)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+
+       mutex_lock(&dln2->irq_lock);
+}
+
+static void dln2_irq_bus_unlock(struct irq_data *irqd)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+       int pin = irqd_to_hwirq(irqd);
+       int enabled, unmasked;
+       unsigned type;
+       int ret;
+
+       enabled = test_bit(pin, dln2->enabled_irqs);
+       unmasked = test_bit(pin, dln2->unmasked_irqs);
+
+       if (enabled != unmasked) {
+               if (unmasked) {
+                       type = dln2->irq_type[pin] & DLN2_GPIO_EVENT_MASK;
+                       set_bit(pin, dln2->enabled_irqs);
+               } else {
+                       type = DLN2_GPIO_EVENT_NONE;
+                       clear_bit(pin, dln2->enabled_irqs);
+               }
+
+               ret = dln2_gpio_set_event_cfg(dln2, pin, type, 0);
+               if (ret)
+                       dev_err(dln2->gpio.dev, "failed to set event\n");
+       }
+
+       mutex_unlock(&dln2->irq_lock);
+}
+
 static struct irq_chip dln2_gpio_irqchip = {
        .name = "dln2-irq",
-       .irq_enable = dln2_irq_enable,
-       .irq_disable = dln2_irq_disable,
        .irq_mask = dln2_irq_mask,
        .irq_unmask = dln2_irq_unmask,
        .irq_set_type = dln2_irq_set_type,
+       .irq_bus_lock = dln2_irq_bus_lock,
+       .irq_bus_sync_unlock = dln2_irq_bus_unlock,
 };
 
 static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
@@ -425,14 +421,7 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
                return;
        }
 
-       if (!test_bit(pin, dln2->irqs_enabled))
-               return;
-       if (test_bit(pin, dln2->irqs_masked)) {
-               set_bit(pin, dln2->irqs_pending);
-               return;
-       }
-
-       switch (dln2->irq_work[pin].type) {
+       switch (dln2->irq_type[pin]) {
        case DLN2_GPIO_EVENT_CHANGE_RISING:
                if (event->value)
                        generic_handle_irq(irq);
@@ -451,7 +440,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
        struct dln2_gpio *dln2;
        struct device *dev = &pdev->dev;
        int pins;
-       int i, ret;
+       int ret;
 
        pins = dln2_gpio_get_pin_count(pdev);
        if (pins < 0) {
@@ -467,15 +456,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
        if (!dln2)
                return -ENOMEM;
 
-       dln2->irq_work = devm_kcalloc(&pdev->dev, pins,
-                                     sizeof(struct dln2_irq_work), GFP_KERNEL);
-       if (!dln2->irq_work)
-               return -ENOMEM;
-       for (i = 0; i < pins; i++) {
-               INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work);
-               dln2->irq_work[i].pin = i;
-               dln2->irq_work[i].dln2 = dln2;
-       }
+       mutex_init(&dln2->irq_lock);
 
        dln2->pdev = pdev;
 
@@ -529,11 +510,8 @@ out:
 static int dln2_gpio_remove(struct platform_device *pdev)
 {
        struct dln2_gpio *dln2 = platform_get_drvdata(pdev);
-       int i;
 
        dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV);
-       for (i = 0; i < dln2->gpio.ngpio; i++)
-               flush_work(&dln2->irq_work[i].work);
        gpiochip_remove(&dln2->gpio);
 
        return 0;
index 09daaf2aeb563d982c71807b8155df4b2c50a6c5..3a5a71050559c7c52964a5b55764ef9b16e82361 100644 (file)
@@ -441,7 +441,8 @@ static int grgpio_probe(struct platform_device *ofdev)
        err = gpiochip_add(gc);
        if (err) {
                dev_err(&ofdev->dev, "Could not add gpiochip\n");
-               irq_domain_remove(priv->domain);
+               if (priv->domain)
+                       irq_domain_remove(priv->domain);
                return err;
        }
 
index da9c316059bc876ba459832ca7ce01513c389897..eea5d7e578c994bd28b04271837d06fe3fee3d69 100644 (file)
@@ -801,9 +801,11 @@ static int mcp230xx_probe(struct i2c_client *client,
                client->irq = irq_of_parse_and_map(client->dev.of_node, 0);
        } else {
                pdata = dev_get_platdata(&client->dev);
-               if (!pdata || !gpio_is_valid(pdata->base)) {
-                       dev_dbg(&client->dev, "invalid platform data\n");
-                       return -EINVAL;
+               if (!pdata) {
+                       pdata = devm_kzalloc(&client->dev,
+                                       sizeof(struct mcp23s08_platform_data),
+                                       GFP_KERNEL);
+                       pdata->base = -1;
                }
        }
 
@@ -924,10 +926,11 @@ static int mcp23s08_probe(struct spi_device *spi)
        } else {
                type = spi_get_device_id(spi)->driver_data;
                pdata = dev_get_platdata(&spi->dev);
-               if (!pdata || !gpio_is_valid(pdata->base)) {
-                       dev_dbg(&spi->dev,
-                                       "invalid or missing platform data\n");
-                       return -EINVAL;
+               if (!pdata) {
+                       pdata = devm_kzalloc(&spi->dev,
+                                       sizeof(struct mcp23s08_platform_data),
+                                       GFP_KERNEL);
+                       pdata->base = -1;
                }
 
                for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
index 30646cfe0efa91e2378fa0e5871dec14cd7ece7d..f476ae2eb0b3c8610e54377cf7e3010079e916bf 100644 (file)
@@ -88,6 +88,8 @@ struct gpio_bank {
 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
 #define LINE_USED(line, offset) (line & (BIT(offset)))
 
+static void omap_gpio_unmask_irq(struct irq_data *d);
+
 static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
 {
        return bank->chip.base + gpio_irq;
@@ -477,6 +479,16 @@ static int omap_gpio_is_input(struct gpio_bank *bank, int mask)
        return readl_relaxed(reg) & mask;
 }
 
+static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned gpio,
+                              unsigned offset)
+{
+       if (!LINE_USED(bank->mod_usage, offset)) {
+               omap_enable_gpio_module(bank, offset);
+               omap_set_gpio_direction(bank, offset, 1);
+       }
+       bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
+}
+
 static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
 {
        struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -506,15 +518,11 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
        spin_lock_irqsave(&bank->lock, flags);
        offset = GPIO_INDEX(bank, gpio);
        retval = omap_set_gpio_triggering(bank, offset, type);
-       if (!LINE_USED(bank->mod_usage, offset)) {
-               omap_enable_gpio_module(bank, offset);
-               omap_set_gpio_direction(bank, offset, 1);
-       } else if (!omap_gpio_is_input(bank, BIT(offset))) {
+       omap_gpio_init_irq(bank, gpio, offset);
+       if (!omap_gpio_is_input(bank, BIT(offset))) {
                spin_unlock_irqrestore(&bank->lock, flags);
                return -EINVAL;
        }
-
-       bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
        spin_unlock_irqrestore(&bank->lock, flags);
 
        if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -792,6 +800,24 @@ exit:
        pm_runtime_put(bank->dev);
 }
 
+static unsigned int omap_gpio_irq_startup(struct irq_data *d)
+{
+       struct gpio_bank *bank = omap_irq_data_get_bank(d);
+       unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
+       unsigned long flags;
+       unsigned offset = GPIO_INDEX(bank, gpio);
+
+       if (!BANK_USED(bank))
+               pm_runtime_get_sync(bank->dev);
+
+       spin_lock_irqsave(&bank->lock, flags);
+       omap_gpio_init_irq(bank, gpio, offset);
+       spin_unlock_irqrestore(&bank->lock, flags);
+       omap_gpio_unmask_irq(d);
+
+       return 0;
+}
+
 static void omap_gpio_irq_shutdown(struct irq_data *d)
 {
        struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -1181,6 +1207,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
        if (!irqc)
                return -ENOMEM;
 
+       irqc->irq_startup = omap_gpio_irq_startup,
        irqc->irq_shutdown = omap_gpio_irq_shutdown,
        irqc->irq_ack = omap_gpio_ack_irq,
        irqc->irq_mask = omap_gpio_mask_irq,
index 604dbe60bdee1abdddb8d706947391987628ab9e..08261f2b3a82afed1ed6c2bdcfcf51804ddbb59d 100644 (file)
@@ -45,8 +45,14 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
                return false;
 
        ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
-       if (ret < 0)
-               return false;
+       if (ret < 0) {
+               /* We've found the gpio chip, but the translation failed.
+                * Return true to stop looking and return the translation
+                * error via out_gpio
+                */
+               gg_data->out_gpio = ERR_PTR(ret);
+               return true;
+        }
 
        gg_data->out_gpio = gpiochip_get_desc(gc, ret);
        return true;
index 2ac1800b58bb7052032ecd23d3acf270a1deb132..7722ed53bd651faae15692621d099551ef9bf308 100644 (file)
@@ -128,7 +128,7 @@ static ssize_t gpio_value_store(struct device *dev,
        return status;
 }
 
-static const DEVICE_ATTR(value, 0644,
+static DEVICE_ATTR(value, 0644,
                gpio_value_show, gpio_value_store);
 
 static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
@@ -353,17 +353,46 @@ static ssize_t gpio_active_low_store(struct device *dev,
        return status ? : size;
 }
 
-static const DEVICE_ATTR(active_low, 0644,
+static DEVICE_ATTR(active_low, 0644,
                gpio_active_low_show, gpio_active_low_store);
 
-static const struct attribute *gpio_attrs[] = {
+static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr,
+                              int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct gpio_desc *desc = dev_get_drvdata(dev);
+       umode_t mode = attr->mode;
+       bool show_direction = test_bit(FLAG_SYSFS_DIR, &desc->flags);
+
+       if (attr == &dev_attr_direction.attr) {
+               if (!show_direction)
+                       mode = 0;
+       } else if (attr == &dev_attr_edge.attr) {
+               if (gpiod_to_irq(desc) < 0)
+                       mode = 0;
+               if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags))
+                       mode = 0;
+       }
+
+       return mode;
+}
+
+static struct attribute *gpio_attrs[] = {
+       &dev_attr_direction.attr,
+       &dev_attr_edge.attr,
        &dev_attr_value.attr,
        &dev_attr_active_low.attr,
        NULL,
 };
 
-static const struct attribute_group gpio_attr_group = {
-       .attrs = (struct attribute **) gpio_attrs,
+static const struct attribute_group gpio_group = {
+       .attrs = gpio_attrs,
+       .is_visible = gpio_is_visible,
+};
+
+static const struct attribute_group *gpio_groups[] = {
+       &gpio_group,
+       NULL
 };
 
 /*
@@ -400,16 +429,13 @@ static ssize_t chip_ngpio_show(struct device *dev,
 }
 static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
 
-static const struct attribute *gpiochip_attrs[] = {
+static struct attribute *gpiochip_attrs[] = {
        &dev_attr_base.attr,
        &dev_attr_label.attr,
        &dev_attr_ngpio.attr,
        NULL,
 };
-
-static const struct attribute_group gpiochip_attr_group = {
-       .attrs = (struct attribute **) gpiochip_attrs,
-};
+ATTRIBUTE_GROUPS(gpiochip);
 
 /*
  * /sys/class/gpio/export ... write-only
@@ -556,45 +582,30 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
                goto fail_unlock;
        }
 
-       if (!desc->chip->direction_input || !desc->chip->direction_output)
-               direction_may_change = false;
+       if (desc->chip->direction_input && desc->chip->direction_output &&
+                       direction_may_change) {
+               set_bit(FLAG_SYSFS_DIR, &desc->flags);
+       }
+
        spin_unlock_irqrestore(&gpio_lock, flags);
 
        offset = gpio_chip_hwgpio(desc);
        if (desc->chip->names && desc->chip->names[offset])
                ioname = desc->chip->names[offset];
 
-       dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
-                           desc, ioname ? ioname : "gpio%u",
-                           desc_to_gpio(desc));
+       dev = device_create_with_groups(&gpio_class, desc->chip->dev,
+                                       MKDEV(0, 0), desc, gpio_groups,
+                                       ioname ? ioname : "gpio%u",
+                                       desc_to_gpio(desc));
        if (IS_ERR(dev)) {
                status = PTR_ERR(dev);
                goto fail_unlock;
        }
 
-       status = sysfs_create_group(&dev->kobj, &gpio_attr_group);
-       if (status)
-               goto fail_unregister_device;
-
-       if (direction_may_change) {
-               status = device_create_file(dev, &dev_attr_direction);
-               if (status)
-                       goto fail_unregister_device;
-       }
-
-       if (gpiod_to_irq(desc) >= 0 && (direction_may_change ||
-                                      !test_bit(FLAG_IS_OUT, &desc->flags))) {
-               status = device_create_file(dev, &dev_attr_edge);
-               if (status)
-                       goto fail_unregister_device;
-       }
-
        set_bit(FLAG_EXPORT, &desc->flags);
        mutex_unlock(&sysfs_lock);
        return 0;
 
-fail_unregister_device:
-       device_unregister(dev);
 fail_unlock:
        mutex_unlock(&sysfs_lock);
        gpiod_dbg(desc, "%s: status %d\n", __func__, status);
@@ -637,6 +648,7 @@ int gpiod_export_link(struct device *dev, const char *name,
                if (tdev != NULL) {
                        status = sysfs_create_link(&dev->kobj, &tdev->kobj,
                                                name);
+                       put_device(tdev);
                } else {
                        status = -ENODEV;
                }
@@ -684,7 +696,7 @@ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
        }
 
        status = sysfs_set_active_low(desc, dev, value);
-
+       put_device(dev);
 unlock:
        mutex_unlock(&sysfs_lock);
 
@@ -718,6 +730,7 @@ void gpiod_unexport(struct gpio_desc *desc)
                dev = class_find_device(&gpio_class, NULL, desc, match_export);
                if (dev) {
                        gpio_setup_irq(desc, dev, 0);
+                       clear_bit(FLAG_SYSFS_DIR, &desc->flags);
                        clear_bit(FLAG_EXPORT, &desc->flags);
                } else
                        status = -ENODEV;
@@ -750,13 +763,13 @@ int gpiochip_export(struct gpio_chip *chip)
 
        /* use chip->base for the ID; it's already known to be unique */
        mutex_lock(&sysfs_lock);
-       dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip,
-                               "gpiochip%d", chip->base);
-       if (!IS_ERR(dev)) {
-               status = sysfs_create_group(&dev->kobj,
-                               &gpiochip_attr_group);
-       } else
+       dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0),
+                                       chip, gpiochip_groups,
+                                       "gpiochip%d", chip->base);
+       if (IS_ERR(dev))
                status = PTR_ERR(dev);
+       else
+               status = 0;
        chip->exported = (status == 0);
        mutex_unlock(&sysfs_lock);
 
index 487afe6f22fcd6872b16995cffcb8179c374a276..568aa2b6bdb019e9285372d731d8a9e9bcbed1f7 100644 (file)
@@ -248,29 +248,30 @@ int gpiochip_add(struct gpio_chip *chip)
                base = gpiochip_find_base(chip->ngpio);
                if (base < 0) {
                        status = base;
-                       goto unlock;
+                       spin_unlock_irqrestore(&gpio_lock, flags);
+                       goto err_free_descs;
                }
                chip->base = base;
        }
 
        status = gpiochip_add_to_list(chip);
+       if (status) {
+               spin_unlock_irqrestore(&gpio_lock, flags);
+               goto err_free_descs;
+       }
 
-       if (status == 0) {
-               for (id = 0; id < chip->ngpio; id++) {
-                       struct gpio_desc *desc = &descs[id];
-                       desc->chip = chip;
-
-                       /* REVISIT:  most hardware initializes GPIOs as
-                        * inputs (often with pullups enabled) so power
-                        * usage is minimized.  Linux code should set the
-                        * gpio direction first thing; but until it does,
-                        * and in case chip->get_direction is not set,
-                        * we may expose the wrong direction in sysfs.
-                        */
-                       desc->flags = !chip->direction_input
-                               ? (1 << FLAG_IS_OUT)
-                               : 0;
-               }
+       for (id = 0; id < chip->ngpio; id++) {
+               struct gpio_desc *desc = &descs[id];
+
+               desc->chip = chip;
+
+               /* REVISIT: most hardware initializes GPIOs as inputs (often
+                * with pullups enabled) so power usage is minimized. Linux
+                * code should set the gpio direction first thing; but until
+                * it does, and in case chip->get_direction is not set, we may
+                * expose the wrong direction in sysfs.
+                */
+               desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
        }
 
        chip->desc = descs;
@@ -284,12 +285,9 @@ int gpiochip_add(struct gpio_chip *chip)
        of_gpiochip_add(chip);
        acpi_gpiochip_add(chip);
 
-       if (status)
-               goto fail;
-
        status = gpiochip_export(chip);
        if (status)
-               goto fail;
+               goto err_remove_chip;
 
        pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__,
                chip->base, chip->base + chip->ngpio - 1,
@@ -297,11 +295,15 @@ int gpiochip_add(struct gpio_chip *chip)
 
        return 0;
 
-unlock:
+err_remove_chip:
+       acpi_gpiochip_remove(chip);
+       of_gpiochip_remove(chip);
+       spin_lock_irqsave(&gpio_lock, flags);
+       list_del(&chip->list);
        spin_unlock_irqrestore(&gpio_lock, flags);
-fail:
-       kfree(descs);
        chip->desc = NULL;
+err_free_descs:
+       kfree(descs);
 
        /* failures here can mean systems won't boot... */
        pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__,
@@ -325,14 +327,15 @@ void gpiochip_remove(struct gpio_chip *chip)
        unsigned long   flags;
        unsigned        id;
 
-       acpi_gpiochip_remove(chip);
-
-       spin_lock_irqsave(&gpio_lock, flags);
+       gpiochip_unexport(chip);
 
        gpiochip_irqchip_remove(chip);
+
+       acpi_gpiochip_remove(chip);
        gpiochip_remove_pin_ranges(chip);
        of_gpiochip_remove(chip);
 
+       spin_lock_irqsave(&gpio_lock, flags);
        for (id = 0; id < chip->ngpio; id++) {
                if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags))
                        dev_crit(chip->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n");
@@ -342,7 +345,6 @@ void gpiochip_remove(struct gpio_chip *chip)
 
        list_del(&chip->list);
        spin_unlock_irqrestore(&gpio_lock, flags);
-       gpiochip_unexport(chip);
 
        kfree(chip->desc);
        chip->desc = NULL;
index e3a52113a5410531472522ca2e1781e73bd91306..550a5eafbd38ce6f1ed5a1d9a899649df75b2fa0 100644 (file)
@@ -77,6 +77,7 @@ struct gpio_desc {
 #define FLAG_OPEN_DRAIN        7       /* Gpio is open drain type */
 #define FLAG_OPEN_SOURCE 8     /* Gpio is open source type */
 #define FLAG_USED_AS_IRQ 9     /* GPIO is connected to an IRQ */
+#define FLAG_SYSFS_DIR 10      /* show sysfs direction attribute */
 
 #define ID_SHIFT       16      /* add new flags before this one */
 
index 66e40398b3d32220624cb7fad671c86058c84339..e620807418ea7559eddc9d5a994a0c5e1f829a9c 100644 (file)
@@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
 obj-$(CONFIG_DRM_TTM)  += ttm/
 obj-$(CONFIG_DRM_TDFX) += tdfx/
 obj-$(CONFIG_DRM_R128) += r128/
+obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
 obj-$(CONFIG_DRM_RADEON)+= radeon/
 obj-$(CONFIG_DRM_MGA)  += mga/
 obj-$(CONFIG_DRM_I810) += i810/
@@ -67,4 +68,3 @@ obj-$(CONFIG_DRM_IMX) += imx/
 obj-y                  += i2c/
 obj-y                  += panel/
 obj-y                  += bridge/
-obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
index be6246de5091d0a589c25de25d147d8ed786af39..307a309110e60d5f473be86d26c24e3b0cb80a90 100644 (file)
@@ -8,7 +8,6 @@ amdkfd-y        := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
                kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
                kfd_process.o kfd_queue.o kfd_mqd_manager.o \
                kfd_kernel_queue.o kfd_packet_manager.o \
-               kfd_process_queue_manager.o kfd_device_queue_manager.o \
-               kfd_interrupt.o
+               kfd_process_queue_manager.o kfd_device_queue_manager.o
 
 obj-$(CONFIG_HSA_AMD)  += amdkfd.o
index 7d4974b83af7821649c7eff1f75ee4d0b05758e6..fcfdf23e1913ed01663b46bebfda5eec8bc4079d 100644 (file)
@@ -31,7 +31,6 @@
 #include <uapi/linux/kfd_ioctl.h>
 #include <linux/time.h>
 #include <linux/mm.h>
-#include <linux/uaccess.h>
 #include <uapi/asm-generic/mman-common.h>
 #include <asm/processor.h>
 #include "kfd_priv.h"
@@ -127,17 +126,14 @@ static int kfd_open(struct inode *inode, struct file *filep)
        return 0;
 }
 
-static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
-                                       void __user *arg)
+static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
+                                       void *data)
 {
-       struct kfd_ioctl_get_version_args args;
+       struct kfd_ioctl_get_version_args *args = data;
        int err = 0;
 
-       args.major_version = KFD_IOCTL_MAJOR_VERSION;
-       args.minor_version = KFD_IOCTL_MINOR_VERSION;
-
-       if (copy_to_user(arg, &args, sizeof(args)))
-               err = -EFAULT;
+       args->major_version = KFD_IOCTL_MAJOR_VERSION;
+       args->minor_version = KFD_IOCTL_MINOR_VERSION;
 
        return err;
 }
@@ -221,10 +217,10 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
        return 0;
 }
 
-static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
-                                       void __user *arg)
+static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
+                                       void *data)
 {
-       struct kfd_ioctl_create_queue_args args;
+       struct kfd_ioctl_create_queue_args *args = data;
        struct kfd_dev *dev;
        int err = 0;
        unsigned int queue_id;
@@ -233,16 +229,13 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
 
        memset(&q_properties, 0, sizeof(struct queue_properties));
 
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
-
        pr_debug("kfd: creating queue ioctl\n");
 
-       err = set_queue_properties_from_user(&q_properties, &args);
+       err = set_queue_properties_from_user(&q_properties, args);
        if (err)
                return err;
 
-       dev = kfd_device_by_id(args.gpu_id);
+       dev = kfd_device_by_id(args->gpu_id);
        if (dev == NULL)
                return -EINVAL;
 
@@ -250,7 +243,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
 
        pdd = kfd_bind_process_to_device(dev, p);
        if (IS_ERR(pdd)) {
-               err = PTR_ERR(pdd);
+               err = -ESRCH;
                goto err_bind_process;
        }
 
@@ -263,33 +256,26 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
        if (err != 0)
                goto err_create_queue;
 
-       args.queue_id = queue_id;
+       args->queue_id = queue_id;
 
        /* Return gpu_id as doorbell offset for mmap usage */
-       args.doorbell_offset = args.gpu_id << PAGE_SHIFT;
-
-       if (copy_to_user(arg, &args, sizeof(args))) {
-               err = -EFAULT;
-               goto err_copy_args_out;
-       }
+       args->doorbell_offset = args->gpu_id << PAGE_SHIFT;
 
        mutex_unlock(&p->mutex);
 
-       pr_debug("kfd: queue id %d was created successfully\n", args.queue_id);
+       pr_debug("kfd: queue id %d was created successfully\n", args->queue_id);
 
        pr_debug("ring buffer address == 0x%016llX\n",
-                       args.ring_base_address);
+                       args->ring_base_address);
 
        pr_debug("read ptr address    == 0x%016llX\n",
-                       args.read_pointer_address);
+                       args->read_pointer_address);
 
        pr_debug("write ptr address   == 0x%016llX\n",
-                       args.write_pointer_address);
+                       args->write_pointer_address);
 
        return 0;
 
-err_copy_args_out:
-       pqm_destroy_queue(&p->pqm, queue_id);
 err_create_queue:
 err_bind_process:
        mutex_unlock(&p->mutex);
@@ -297,99 +283,90 @@ err_bind_process:
 }
 
 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
-                                       void __user *arg)
+                                       void *data)
 {
        int retval;
-       struct kfd_ioctl_destroy_queue_args args;
-
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
+       struct kfd_ioctl_destroy_queue_args *args = data;
 
        pr_debug("kfd: destroying queue id %d for PASID %d\n",
-                               args.queue_id,
+                               args->queue_id,
                                p->pasid);
 
        mutex_lock(&p->mutex);
 
-       retval = pqm_destroy_queue(&p->pqm, args.queue_id);
+       retval = pqm_destroy_queue(&p->pqm, args->queue_id);
 
        mutex_unlock(&p->mutex);
        return retval;
 }
 
 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
-                                       void __user *arg)
+                                       void *data)
 {
        int retval;
-       struct kfd_ioctl_update_queue_args args;
+       struct kfd_ioctl_update_queue_args *args = data;
        struct queue_properties properties;
 
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
-
-       if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+       if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
                pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
                return -EINVAL;
        }
 
-       if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) {
+       if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
                pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
                return -EINVAL;
        }
 
-       if ((args.ring_base_address) &&
+       if ((args->ring_base_address) &&
                (!access_ok(VERIFY_WRITE,
-                       (const void __user *) args.ring_base_address,
+                       (const void __user *) args->ring_base_address,
                        sizeof(uint64_t)))) {
                pr_err("kfd: can't access ring base address\n");
                return -EFAULT;
        }
 
-       if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) {
+       if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
                pr_err("kfd: ring size must be a power of 2 or 0\n");
                return -EINVAL;
        }
 
-       properties.queue_address = args.ring_base_address;
-       properties.queue_size = args.ring_size;
-       properties.queue_percent = args.queue_percentage;
-       properties.priority = args.queue_priority;
+       properties.queue_address = args->ring_base_address;
+       properties.queue_size = args->ring_size;
+       properties.queue_percent = args->queue_percentage;
+       properties.priority = args->queue_priority;
 
        pr_debug("kfd: updating queue id %d for PASID %d\n",
-                       args.queue_id, p->pasid);
+                       args->queue_id, p->pasid);
 
        mutex_lock(&p->mutex);
 
-       retval = pqm_update_queue(&p->pqm, args.queue_id, &properties);
+       retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
 
        mutex_unlock(&p->mutex);
 
        return retval;
 }
 
-static long kfd_ioctl_set_memory_policy(struct file *filep,
-                               struct kfd_process *p, void __user *arg)
+static int kfd_ioctl_set_memory_policy(struct file *filep,
+                                       struct kfd_process *p, void *data)
 {
-       struct kfd_ioctl_set_memory_policy_args args;
+       struct kfd_ioctl_set_memory_policy_args *args = data;
        struct kfd_dev *dev;
        int err = 0;
        struct kfd_process_device *pdd;
        enum cache_policy default_policy, alternate_policy;
 
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
-
-       if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
-           && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+       if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
+           && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
                return -EINVAL;
        }
 
-       if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
-           && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+       if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
+           && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
                return -EINVAL;
        }
 
-       dev = kfd_device_by_id(args.gpu_id);
+       dev = kfd_device_by_id(args->gpu_id);
        if (dev == NULL)
                return -EINVAL;
 
@@ -397,23 +374,23 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
 
        pdd = kfd_bind_process_to_device(dev, p);
        if (IS_ERR(pdd)) {
-               err = PTR_ERR(pdd);
+               err = -ESRCH;
                goto out;
        }
 
-       default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+       default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
                         ? cache_policy_coherent : cache_policy_noncoherent;
 
        alternate_policy =
-               (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+               (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
                   ? cache_policy_coherent : cache_policy_noncoherent;
 
        if (!dev->dqm->set_cache_memory_policy(dev->dqm,
                                &pdd->qpd,
                                default_policy,
                                alternate_policy,
-                               (void __user *)args.alternate_aperture_base,
-                               args.alternate_aperture_size))
+                               (void __user *)args->alternate_aperture_base,
+                               args->alternate_aperture_size))
                err = -EINVAL;
 
 out:
@@ -422,53 +399,44 @@ out:
        return err;
 }
 
-static long kfd_ioctl_get_clock_counters(struct file *filep,
-                               struct kfd_process *p, void __user *arg)
+static int kfd_ioctl_get_clock_counters(struct file *filep,
+                               struct kfd_process *p, void *data)
 {
-       struct kfd_ioctl_get_clock_counters_args args;
+       struct kfd_ioctl_get_clock_counters_args *args = data;
        struct kfd_dev *dev;
        struct timespec time;
 
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
-
-       dev = kfd_device_by_id(args.gpu_id);
+       dev = kfd_device_by_id(args->gpu_id);
        if (dev == NULL)
                return -EINVAL;
 
        /* Reading GPU clock counter from KGD */
-       args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
+       args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
 
        /* No access to rdtsc. Using raw monotonic time */
        getrawmonotonic(&time);
-       args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
+       args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
 
        get_monotonic_boottime(&time);
-       args.system_clock_counter = (uint64_t)timespec_to_ns(&time);
+       args->system_clock_counter = (uint64_t)timespec_to_ns(&time);
 
        /* Since the counter is in nano-seconds we use 1GHz frequency */
-       args.system_clock_freq = 1000000000;
-
-       if (copy_to_user(arg, &args, sizeof(args)))
-               return -EFAULT;
+       args->system_clock_freq = 1000000000;
 
        return 0;
 }
 
 
 static int kfd_ioctl_get_process_apertures(struct file *filp,
-                               struct kfd_process *p, void __user *arg)
+                               struct kfd_process *p, void *data)
 {
-       struct kfd_ioctl_get_process_apertures_args args;
+       struct kfd_ioctl_get_process_apertures_args *args = data;
        struct kfd_process_device_apertures *pAperture;
        struct kfd_process_device *pdd;
 
        dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
 
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
-
-       args.num_of_nodes = 0;
+       args->num_of_nodes = 0;
 
        mutex_lock(&p->mutex);
 
@@ -477,7 +445,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
                /* Run over all pdd of the process */
                pdd = kfd_get_first_process_device_data(p);
                do {
-                       pAperture = &args.process_apertures[args.num_of_nodes];
+                       pAperture =
+                               &args->process_apertures[args->num_of_nodes];
                        pAperture->gpu_id = pdd->dev->id;
                        pAperture->lds_base = pdd->lds_base;
                        pAperture->lds_limit = pdd->lds_limit;
@@ -487,7 +456,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
                        pAperture->scratch_limit = pdd->scratch_limit;
 
                        dev_dbg(kfd_device,
-                               "node id %u\n", args.num_of_nodes);
+                               "node id %u\n", args->num_of_nodes);
                        dev_dbg(kfd_device,
                                "gpu id %u\n", pdd->dev->id);
                        dev_dbg(kfd_device,
@@ -503,80 +472,131 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
                        dev_dbg(kfd_device,
                                "scratch_limit %llX\n", pdd->scratch_limit);
 
-                       args.num_of_nodes++;
+                       args->num_of_nodes++;
                } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
-                               (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS));
+                               (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
        }
 
        mutex_unlock(&p->mutex);
 
-       if (copy_to_user(arg, &args, sizeof(args)))
-               return -EFAULT;
-
        return 0;
 }
 
+#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
+       [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+
+/** Ioctl table */
+static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
+                       kfd_ioctl_get_version, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
+                       kfd_ioctl_create_queue, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
+                       kfd_ioctl_destroy_queue, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
+                       kfd_ioctl_set_memory_policy, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
+                       kfd_ioctl_get_clock_counters, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
+                       kfd_ioctl_get_process_apertures, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
+                       kfd_ioctl_update_queue, 0),
+};
+
+#define AMDKFD_CORE_IOCTL_COUNT        ARRAY_SIZE(amdkfd_ioctls)
+
 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
 {
        struct kfd_process *process;
-       long err = -EINVAL;
+       amdkfd_ioctl_t *func;
+       const struct amdkfd_ioctl_desc *ioctl = NULL;
+       unsigned int nr = _IOC_NR(cmd);
+       char stack_kdata[128];
+       char *kdata = NULL;
+       unsigned int usize, asize;
+       int retcode = -EINVAL;
 
-       dev_dbg(kfd_device,
-               "ioctl cmd 0x%x (#%d), arg 0x%lx\n",
-               cmd, _IOC_NR(cmd), arg);
+       if (nr >= AMDKFD_CORE_IOCTL_COUNT)
+               goto err_i1;
+
+       if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
+               u32 amdkfd_size;
+
+               ioctl = &amdkfd_ioctls[nr];
+
+               amdkfd_size = _IOC_SIZE(ioctl->cmd);
+               usize = asize = _IOC_SIZE(cmd);
+               if (amdkfd_size > asize)
+                       asize = amdkfd_size;
+
+               cmd = ioctl->cmd;
+       } else
+               goto err_i1;
+
+       dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
 
        process = kfd_get_process(current);
-       if (IS_ERR(process))
-               return PTR_ERR(process);
+       if (IS_ERR(process)) {
+               dev_dbg(kfd_device, "no process\n");
+               goto err_i1;
+       }
 
-       switch (cmd) {
-       case KFD_IOC_GET_VERSION:
-               err = kfd_ioctl_get_version(filep, process, (void __user *)arg);
-               break;
-       case KFD_IOC_CREATE_QUEUE:
-               err = kfd_ioctl_create_queue(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       case KFD_IOC_DESTROY_QUEUE:
-               err = kfd_ioctl_destroy_queue(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       case KFD_IOC_SET_MEMORY_POLICY:
-               err = kfd_ioctl_set_memory_policy(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       case KFD_IOC_GET_CLOCK_COUNTERS:
-               err = kfd_ioctl_get_clock_counters(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       case KFD_IOC_GET_PROCESS_APERTURES:
-               err = kfd_ioctl_get_process_apertures(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       case KFD_IOC_UPDATE_QUEUE:
-               err = kfd_ioctl_update_queue(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       default:
-               dev_err(kfd_device,
-                       "unknown ioctl cmd 0x%x, arg 0x%lx)\n",
-                       cmd, arg);
-               err = -EINVAL;
-               break;
+       /* Do not trust userspace, use our own definition */
+       func = ioctl->func;
+
+       if (unlikely(!func)) {
+               dev_dbg(kfd_device, "no function\n");
+               retcode = -EINVAL;
+               goto err_i1;
        }
 
-       if (err < 0)
-               dev_err(kfd_device,
-                       "ioctl error %ld for ioctl cmd 0x%x (#%d)\n",
-                       err, cmd, _IOC_NR(cmd));
+       if (cmd & (IOC_IN | IOC_OUT)) {
+               if (asize <= sizeof(stack_kdata)) {
+                       kdata = stack_kdata;
+               } else {
+                       kdata = kmalloc(asize, GFP_KERNEL);
+                       if (!kdata) {
+                               retcode = -ENOMEM;
+                               goto err_i1;
+                       }
+               }
+               if (asize > usize)
+                       memset(kdata + usize, 0, asize - usize);
+       }
 
-       return err;
+       if (cmd & IOC_IN) {
+               if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
+                       retcode = -EFAULT;
+                       goto err_i1;
+               }
+       } else if (cmd & IOC_OUT) {
+               memset(kdata, 0, usize);
+       }
+
+       retcode = func(filep, process, kdata);
+
+       if (cmd & IOC_OUT)
+               if (copy_to_user((void __user *)arg, kdata, usize) != 0)
+                       retcode = -EFAULT;
+
+err_i1:
+       if (!ioctl)
+               dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
+                         task_pid_nr(current), cmd, nr);
+
+       if (kdata != stack_kdata)
+               kfree(kdata);
+
+       if (retcode)
+               dev_dbg(kfd_device, "ret = %d\n", retcode);
+
+       return retcode;
 }
 
 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
index 43884ebd4303fedd47b56431737a0f726852c552..25bc47f3c1cf53d0181d0bc63f3c381f0502f138 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/slab.h>
 #include "kfd_priv.h"
 #include "kfd_device_queue_manager.h"
+#include "kfd_pm4_headers.h"
 
 #define MQD_SIZE_ALIGNED 768
 
@@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
        kfd->shared_resources = *gpu_resources;
 
        /* calculate max size of mqds needed for queues */
-       size = max_num_of_processes *
-               max_num_of_queues_per_process *
-               kfd->device_info->mqd_size_aligned;
+       size = max_num_of_queues_per_device *
+                       kfd->device_info->mqd_size_aligned;
 
        /* add another 512KB for all other allocations on gart */
        size += 512 * 1024;
@@ -192,13 +192,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
                goto kfd_topology_add_device_error;
        }
 
-       if (kfd_interrupt_init(kfd)) {
-               dev_err(kfd_device,
-                       "Error initializing interrupts for device (%x:%x)\n",
-                       kfd->pdev->vendor, kfd->pdev->device);
-               goto kfd_interrupt_error;
-       }
-
        if (!device_iommu_pasid_init(kfd)) {
                dev_err(kfd_device,
                        "Error initializing iommuv2 for device (%x:%x)\n",
@@ -237,8 +230,6 @@ dqm_start_error:
 device_queue_manager_error:
        amd_iommu_free_device(kfd->pdev);
 device_iommu_pasid_error:
-       kfd_interrupt_exit(kfd);
-kfd_interrupt_error:
        kfd_topology_remove_device(kfd);
 kfd_topology_add_device_error:
        kfd2kgd->fini_sa_manager(kfd->kgd);
@@ -254,7 +245,6 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
        if (kfd->init_complete) {
                device_queue_manager_uninit(kfd->dqm);
                amd_iommu_free_device(kfd->pdev);
-               kfd_interrupt_exit(kfd);
                kfd_topology_remove_device(kfd);
        }
 
@@ -296,13 +286,5 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
 /* This is called directly from KGD at ISR. */
 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
 {
-       if (kfd->init_complete) {
-               spin_lock(&kfd->interrupt_lock);
-
-               if (kfd->interrupts_active
-                   && enqueue_ih_ring_entry(kfd, ih_ring_entry))
-                       schedule_work(&kfd->interrupt_work);
-
-               spin_unlock(&kfd->interrupt_lock);
-       }
+       /* Process interrupts / schedule work as necessary */
 }
index 924e90c072e513180ec8991b50333f2af663a3f5..0fd592799d58dc6fdbd6c37bb5e826ed0f89cf9a 100644 (file)
@@ -161,6 +161,9 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
 {
        int bit = qpd->vmid - KFD_VMID_START_OFFSET;
 
+       /* Release the vmid mapping */
+       set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
+
        set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
        qpd->vmid = 0;
        q->properties.vmid = 0;
@@ -180,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
 
        mutex_lock(&dqm->lock);
 
+       if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+               pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+                               dqm->total_queue_count);
+               mutex_unlock(&dqm->lock);
+               return -EPERM;
+       }
+
        if (list_empty(&qpd->queues_list)) {
                retval = allocate_vmid(dqm, qpd, q);
                if (retval != 0) {
@@ -204,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
        list_add(&q->list, &qpd->queues_list);
        dqm->queue_count++;
 
+       /*
+        * Unconditionally increment this counter, regardless of the queue's
+        * type or whether the queue is active.
+        */
+       dqm->total_queue_count++;
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
+
        mutex_unlock(&dqm->lock);
        return 0;
 }
@@ -272,6 +290,18 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
                return retval;
        }
 
+       pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
+                       q->pipe,
+                       q->queue);
+
+       retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
+                       q->queue, (uint32_t __user *) q->properties.write_ptr);
+       if (retval != 0) {
+               deallocate_hqd(dqm, q);
+               mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+               return retval;
+       }
+
        return 0;
 }
 
@@ -311,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
        if (list_empty(&qpd->queues_list))
                deallocate_vmid(dqm, qpd, q);
        dqm->queue_count--;
+
+       /*
+        * Unconditionally decrement this counter, regardless of the queue's
+        * type
+        */
+       dqm->total_queue_count--;
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
+
 out:
        mutex_unlock(&dqm->lock);
        return retval;
@@ -320,6 +359,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
 {
        int retval;
        struct mqd_manager *mqd;
+       bool prev_active = false;
 
        BUG_ON(!dqm || !q || !q->mqd);
 
@@ -330,10 +370,18 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
                return -ENOMEM;
        }
 
-       retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
        if (q->properties.is_active == true)
+               prev_active = true;
+
+       /*
+        *
+        * check active state vs. the previous state
+        * and modify counter accordingly
+        */
+       retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+       if ((q->properties.is_active == true) && (prev_active == false))
                dqm->queue_count++;
-       else
+       else if ((q->properties.is_active == false) && (prev_active == true))
                dqm->queue_count--;
 
        if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
@@ -517,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm,
 
        for (i = 0; i < pipes_num; i++) {
                inx = i + first_pipe;
+               /*
+                * HPD buffer on GTT is allocated by amdkfd, no need to waste
+                * space in GTT for pipelines we don't initialize
+                */
                pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
                pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
                /* = log2(bytes/4)-1 */
-               kfd2kgd->init_pipeline(dqm->dev->kgd, i,
+               kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
                                CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
        }
 
@@ -536,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm)
 
        pr_debug("kfd: In %s\n", __func__);
 
-       retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
+       retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
        if (retval != 0)
                return retval;
 
@@ -728,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
        pr_debug("kfd: In func %s\n", __func__);
 
        mutex_lock(&dqm->lock);
+       if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+               pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
+                               dqm->total_queue_count);
+               mutex_unlock(&dqm->lock);
+               return -EPERM;
+       }
+
+       /*
+        * Unconditionally increment this counter, regardless of the queue's
+        * type or whether the queue is active.
+        */
+       dqm->total_queue_count++;
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
+
        list_add(&kq->list, &qpd->priv_queue_list);
        dqm->queue_count++;
        qpd->is_debug = true;
@@ -751,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
        dqm->queue_count--;
        qpd->is_debug = false;
        execute_queues_cpsch(dqm, false);
+       /*
+        * Unconditionally decrement this counter, regardless of the queue's
+        * type.
+        */
+       dqm->total_queue_count--;
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
        mutex_unlock(&dqm->lock);
 }
 
@@ -769,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 
        mutex_lock(&dqm->lock);
 
+       if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+               pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+                               dqm->total_queue_count);
+               retval = -EPERM;
+               goto out;
+       }
+
        mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
        if (mqd == NULL) {
                mutex_unlock(&dqm->lock);
@@ -786,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
                retval = execute_queues_cpsch(dqm, false);
        }
 
+       /*
+        * Unconditionally increment this counter, regardless of the queue's
+        * type or whether the queue is active.
+        */
+       dqm->total_queue_count++;
+
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
+
 out:
        mutex_unlock(&dqm->lock);
        return retval;
@@ -906,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
 
        mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
 
+       /*
+        * Unconditionally decrement this counter, regardless of the queue's
+        * type
+        */
+       dqm->total_queue_count--;
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
+
        mutex_unlock(&dqm->lock);
 
        return 0;
index c3f189e8ae35da5527efebe3ae016b47b2b3342c..52035bf0c1cb3ce896901573491a071c17c8bcea 100644 (file)
@@ -130,6 +130,7 @@ struct device_queue_manager {
        struct list_head        queues;
        unsigned int            processes_count;
        unsigned int            queue_count;
+       unsigned int            total_queue_count;
        unsigned int            next_pipe_to_allocate;
        unsigned int            *allocated_queues;
        unsigned int            vmid_bitmap;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
deleted file mode 100644 (file)
index 5b99909..0000000
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * KFD Interrupts.
- *
- * AMD GPUs deliver interrupts by pushing an interrupt description onto the
- * interrupt ring and then sending an interrupt. KGD receives the interrupt
- * in ISR and sends us a pointer to each new entry on the interrupt ring.
- *
- * We generally can't process interrupt-signaled events from ISR, so we call
- * out to each interrupt client module (currently only the scheduler) to ask if
- * each interrupt is interesting. If they return true, then it requires further
- * processing so we copy it to an internal interrupt ring and call each
- * interrupt client again from a work-queue.
- *
- * There's no acknowledgment for the interrupts we use. The hardware simply
- * queues a new interrupt each time without waiting.
- *
- * The fixed-size internal queue means that it's possible for us to lose
- * interrupts because we have no back-pressure to the hardware.
- */
-
-#include <linux/slab.h>
-#include <linux/device.h>
-#include "kfd_priv.h"
-
-#define KFD_INTERRUPT_RING_SIZE 256
-
-static void interrupt_wq(struct work_struct *);
-
-int kfd_interrupt_init(struct kfd_dev *kfd)
-{
-       void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
-                                       kfd->device_info->ih_ring_entry_size,
-                                       GFP_KERNEL);
-       if (!interrupt_ring)
-               return -ENOMEM;
-
-       kfd->interrupt_ring = interrupt_ring;
-       kfd->interrupt_ring_size =
-               KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size;
-       atomic_set(&kfd->interrupt_ring_wptr, 0);
-       atomic_set(&kfd->interrupt_ring_rptr, 0);
-
-       spin_lock_init(&kfd->interrupt_lock);
-
-       INIT_WORK(&kfd->interrupt_work, interrupt_wq);
-
-       kfd->interrupts_active = true;
-
-       /*
-        * After this function returns, the interrupt will be enabled. This
-        * barrier ensures that the interrupt running on a different processor
-        * sees all the above writes.
-        */
-       smp_wmb();
-
-       return 0;
-}
-
-void kfd_interrupt_exit(struct kfd_dev *kfd)
-{
-       /*
-        * Stop the interrupt handler from writing to the ring and scheduling
-        * workqueue items. The spinlock ensures that any interrupt running
-        * after we have unlocked sees interrupts_active = false.
-        */
-       unsigned long flags;
-
-       spin_lock_irqsave(&kfd->interrupt_lock, flags);
-       kfd->interrupts_active = false;
-       spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
-
-       /*
-        * Flush_scheduled_work ensures that there are no outstanding
-        * work-queue items that will access interrupt_ring. New work items
-        * can't be created because we stopped interrupt handling above.
-        */
-       flush_scheduled_work();
-
-       kfree(kfd->interrupt_ring);
-}
-
-/*
- * This assumes that it can't be called concurrently with itself
- * but only with dequeue_ih_ring_entry.
- */
-bool enqueue_ih_ring_entry(struct kfd_dev *kfd,        const void *ih_ring_entry)
-{
-       unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
-       unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
-
-       if ((rptr - wptr) % kfd->interrupt_ring_size ==
-                                       kfd->device_info->ih_ring_entry_size) {
-               /* This is very bad, the system is likely to hang. */
-               dev_err_ratelimited(kfd_chardev(),
-                       "Interrupt ring overflow, dropping interrupt.\n");
-               return false;
-       }
-
-       memcpy(kfd->interrupt_ring + wptr, ih_ring_entry,
-                       kfd->device_info->ih_ring_entry_size);
-
-       wptr = (wptr + kfd->device_info->ih_ring_entry_size) %
-                       kfd->interrupt_ring_size;
-       smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */
-       atomic_set(&kfd->interrupt_ring_wptr, wptr);
-
-       return true;
-}
-
-/*
- * This assumes that it can't be called concurrently with itself
- * but only with enqueue_ih_ring_entry.
- */
-static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
-{
-       /*
-        * Assume that wait queues have an implicit barrier, i.e. anything that
-        * happened in the ISR before it queued work is visible.
-        */
-
-       unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
-       unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
-
-       if (rptr == wptr)
-               return false;
-
-       memcpy(ih_ring_entry, kfd->interrupt_ring + rptr,
-                       kfd->device_info->ih_ring_entry_size);
-
-       rptr = (rptr + kfd->device_info->ih_ring_entry_size) %
-                       kfd->interrupt_ring_size;
-
-       /*
-        * Ensure the rptr write update is not visible until
-        * memcpy has finished reading.
-        */
-       smp_mb();
-       atomic_set(&kfd->interrupt_ring_rptr, rptr);
-
-       return true;
-}
-
-static void interrupt_wq(struct work_struct *work)
-{
-       struct kfd_dev *dev = container_of(work, struct kfd_dev,
-                                               interrupt_work);
-
-       uint32_t ih_ring_entry[DIV_ROUND_UP(
-                               dev->device_info->ih_ring_entry_size,
-                               sizeof(uint32_t))];
-
-       while (dequeue_ih_ring_entry(dev, ih_ring_entry))
-               ;
-}
index 95d5af138e6e7f2bcbd8d76351f7e12827031189..1c385c23dd0b8e2ad4155972d53bc9fe8195d03a 100644 (file)
@@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444);
 MODULE_PARM_DESC(sched_policy,
        "Kernel cmdline parameter that defines the amdkfd scheduling policy");
 
-int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT;
-module_param(max_num_of_processes, int, 0444);
-MODULE_PARM_DESC(max_num_of_processes,
-       "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes");
-
-int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
-module_param(max_num_of_queues_per_process, int, 0444);
-MODULE_PARM_DESC(max_num_of_queues_per_process,
-       "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
+int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
+module_param(max_num_of_queues_per_device, int, 0444);
+MODULE_PARM_DESC(max_num_of_queues_per_device,
+       "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
 
 bool kgd2kfd_init(unsigned interface_version,
                  const struct kfd2kgd_calls *f2g,
@@ -100,16 +95,10 @@ static int __init kfd_module_init(void)
        }
 
        /* Verify module parameters */
-       if ((max_num_of_processes < 0) ||
-               (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) {
-               pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n");
-               return -1;
-       }
-
-       if ((max_num_of_queues_per_process < 0) ||
-               (max_num_of_queues_per_process >
-                       KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
-               pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
+       if ((max_num_of_queues_per_device < 1) ||
+               (max_num_of_queues_per_device >
+                       KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
+               pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
                return -1;
        }
 
index adc31474e786195bb3308150334c5a7a280f1d81..4c3828cf45bf71fbbae7e26f21f22bcd927fadce 100644 (file)
@@ -184,7 +184,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
                        uint32_t queue_id)
 {
 
-       return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address,
+       return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
                                        pipe_id, queue_id);
 
 }
index 71699ad97d74487d532cef2a168f8bb4d4fb5366..6cfe7f1f18cff0d805a75097a5a9f86e2a4fcdfd 100644 (file)
@@ -30,9 +30,9 @@ static DEFINE_MUTEX(pasid_mutex);
 
 int kfd_pasid_init(void)
 {
-       pasid_limit = max_num_of_processes;
+       pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
 
-       pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
+       pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
        if (!pasid_bitmap)
                return -ENOMEM;
 
index f9fb81e3bb09b24edd7f8de6c22cf7e64a92391b..96dc10e8904afc3cd2e5a8069ca8fb0a1eda2e6b 100644 (file)
 #define kfd_alloc_struct(ptr_to_struct)        \
        ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
 
-/* Kernel module parameter to specify maximum number of supported processes */
-extern int max_num_of_processes;
-
-#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
 #define KFD_MAX_NUM_OF_PROCESSES 512
+#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
 
 /*
- * Kernel module parameter to specify maximum number of supported queues
- * per process
+ * Kernel module parameter to specify maximum number of supported queues per
+ * device
  */
-extern int max_num_of_queues_per_process;
+extern int max_num_of_queues_per_device;
 
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE               \
+       (KFD_MAX_NUM_OF_PROCESSES *                     \
+                       KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
 
 #define KFD_KERNEL_QUEUE_SIZE 2048
 
@@ -135,22 +134,10 @@ struct kfd_dev {
 
        struct kgd2kfd_shared_resources shared_resources;
 
-       void *interrupt_ring;
-       size_t interrupt_ring_size;
-       atomic_t interrupt_ring_rptr;
-       atomic_t interrupt_ring_wptr;
-       struct work_struct interrupt_work;
-       spinlock_t interrupt_lock;
-
        /* QCM Device instance */
        struct device_queue_manager *dqm;
 
        bool init_complete;
-       /*
-        * Interrupts of interest to KFD are copied
-        * from the HW ring into a SW ring.
-        */
-       bool interrupts_active;
 };
 
 /* KGD2KFD callbacks */
@@ -463,6 +450,24 @@ struct kfd_process {
        bool is_32bit_user_mode;
 };
 
+/**
+ * Ioctl function type.
+ *
+ * \param filep pointer to file structure.
+ * \param p amdkfd process pointer.
+ * \param data pointer to arg that was copied from user.
+ */
+typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
+                               void *data);
+
+struct amdkfd_ioctl_desc {
+       unsigned int cmd;
+       int flags;
+       amdkfd_ioctl_t *func;
+       unsigned int cmd_drv;
+       const char *name;
+};
+
 void kfd_process_create_wq(void);
 void kfd_process_destroy_wq(void);
 struct kfd_process *kfd_create_process(const struct task_struct *);
@@ -513,10 +518,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
 struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
 
 /* Interrupts */
-int kfd_interrupt_init(struct kfd_dev *dev);
-void kfd_interrupt_exit(struct kfd_dev *dev);
 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
-bool enqueue_ih_ring_entry(struct kfd_dev *kfd,        const void *ih_ring_entry);
 
 /* Power Management */
 void kgd2kfd_suspend(struct kfd_dev *kfd);
index 47526780d736ced5470bfb2822a3892f879b0320..2fda1927bff794e7ff626169277d28aff8d1acba 100644 (file)
@@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
        pr_debug("kfd: in %s\n", __func__);
 
        found = find_first_zero_bit(pqm->queue_slot_bitmap,
-                       max_num_of_queues_per_process);
+                       KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
 
        pr_debug("kfd: the new slot id %lu\n", found);
 
-       if (found >= max_num_of_queues_per_process) {
+       if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
                pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
                                pqm->process->pasid);
                return -ENOMEM;
@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
 
        INIT_LIST_HEAD(&pqm->queues);
        pqm->queue_slot_bitmap =
-                       kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process,
+                       kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
                                        BITS_PER_BYTE), GFP_KERNEL);
        if (pqm->queue_slot_bitmap == NULL)
                return -ENOMEM;
@@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
                pqn->kq = NULL;
                retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
                                                &q->properties.vmid);
+               pr_debug("DQM returned %d for create_queue\n", retval);
                print_queue(q);
                break;
        case KFD_QUEUE_TYPE_DIQ:
@@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
        }
 
        if (retval != 0) {
-               pr_err("kfd: error dqm create queue\n");
+               pr_debug("Error dqm create queue\n");
                goto err_create_queue;
        }
 
@@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 err_create_queue:
        kfree(pqn);
 err_allocate_pqn:
+       /* check if queues list is empty unregister process from device */
        clear_bit(*qid, pqm->queue_slot_bitmap);
+       if (list_empty(&pqm->queues))
+               dev->dqm->unregister_process(dev->dqm, &pdd->qpd);
        return retval;
 }
 
@@ -311,7 +315,11 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
        BUG_ON(!pqm);
 
        pqn = get_queue_by_qid(pqm, qid);
-       BUG_ON(!pqn);
+       if (!pqn) {
+               pr_debug("amdkfd: No queue %d exists for update operation\n",
+                               qid);
+               return -EFAULT;
+       }
 
        pqn->q->properties.queue_address = p->queue_address;
        pqn->q->properties.queue_size = p->queue_size;
index b11792d7e70e2a6fca1fdc15636c4672fb3165dc..cca1708fd811be8253ab0d2989d74e405f3dab04 100644 (file)
@@ -921,7 +921,7 @@ static int kfd_build_sysfs_node_tree(void)
        uint32_t i = 0;
 
        list_for_each_entry(dev, &topology_device_list, list) {
-               ret = kfd_build_sysfs_node_entry(dev, 0);
+               ret = kfd_build_sysfs_node_entry(dev, i);
                if (ret < 0)
                        return ret;
                i++;
index 47b551970a14aed723b24f7d2662293fa8914dff..96a512208fade6825da6aa8e2645f2a088d485d2 100644 (file)
@@ -183,7 +183,7 @@ struct kfd2kgd_calls {
        int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
                        uint32_t queue_id, uint32_t __user *wptr);
 
-       bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address,
+       bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
                                uint32_t pipe_id, uint32_t queue_id);
 
        int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
index c2a1cba1e984546d63f033a4cf4b07ff3279bb16..b9140032962d943e658a9bc18af8be8418bc479f 100644 (file)
 #include "cirrus_drv.h"
 
 int cirrus_modeset = -1;
+int cirrus_bpp = 24;
 
 MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
 module_param_named(modeset, cirrus_modeset, int, 0400);
+MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)");
+module_param_named(bpp, cirrus_bpp, int, 0400);
 
 /*
  * This is the generic driver code. This binds the driver to the drm core,
index 693a4565c4ffb2a0629d48ec206ba3bb4accf830..705061537a27694c3834374a22bb297510a041be 100644 (file)
@@ -262,4 +262,7 @@ static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
 
 int cirrus_bo_push_sysram(struct cirrus_bo *bo);
 int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
+
+extern int cirrus_bpp;
+
 #endif                         /* __CIRRUS_DRV_H__ */
index 4c2d68e9102d6304b8fd5cc655266300706f32da..e4b976658087100304cd76f66330c16b9dcb7271 100644 (file)
@@ -320,6 +320,8 @@ bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
        const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
        const int max_size = cdev->mc.vram_size;
 
+       if (bpp > cirrus_bpp)
+               return false;
        if (bpp > 32)
                return false;
 
index 99d4a74ffeaffd2582ca78353ae2156a90c796f5..61385f2298bf752eb87b1645582d62d47719f3be 100644 (file)
@@ -501,8 +501,13 @@ static int cirrus_vga_get_modes(struct drm_connector *connector)
        int count;
 
        /* Just add a static list of modes */
-       count = drm_add_modes_noedid(connector, 1280, 1024);
-       drm_set_preferred_mode(connector, 1024, 768);
+       if (cirrus_bpp <= 24) {
+               count = drm_add_modes_noedid(connector, 1280, 1024);
+               drm_set_preferred_mode(connector, 1024, 768);
+       } else {
+               count = drm_add_modes_noedid(connector, 800, 600);
+               drm_set_preferred_mode(connector, 800, 600);
+       }
        return count;
 }
 
index 52ce26d6b4fb8aeda59bb6358f62a659bc0464b7..dc386ebe5193891e6780c7eba47951f3f3fd3e42 100644 (file)
@@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
 }
 EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
 
+static void remove_from_modeset(struct drm_mode_set *set,
+               struct drm_connector *connector)
+{
+       int i, j;
+
+       for (i = 0; i < set->num_connectors; i++) {
+               if (set->connectors[i] == connector)
+                       break;
+       }
+
+       if (i == set->num_connectors)
+               return;
+
+       for (j = i + 1; j < set->num_connectors; j++) {
+               set->connectors[j - 1] = set->connectors[j];
+       }
+       set->num_connectors--;
+
+       /* because i915 is pissy about this..
+        * TODO maybe need to makes sure we set it back to !=NULL somewhere?
+        */
+       if (set->num_connectors == 0)
+               set->fb = NULL;
+}
+
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
                                       struct drm_connector *connector)
 {
@@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
        }
        fb_helper->connector_count--;
        kfree(fb_helper_connector);
+
+       /* also cleanup dangling references to the connector: */
+       for (i = 0; i < fb_helper->crtc_count; i++)
+               remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
+
        return 0;
 }
 EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
@@ -741,7 +771,9 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
        int i, j, rc = 0;
        int start;
 
-       drm_modeset_lock_all(dev);
+       if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+               return -EBUSY;
+       }
        if (!drm_fb_helper_is_bound(fb_helper)) {
                drm_modeset_unlock_all(dev);
                return -EBUSY;
@@ -915,7 +947,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
        int ret = 0;
        int i;
 
-       drm_modeset_lock_all(dev);
+       if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+               return -EBUSY;
+       }
        if (!drm_fb_helper_is_bound(fb_helper)) {
                drm_modeset_unlock_all(dev);
                return -EBUSY;
index 121470a83d1a497a61a19fe5cd09f769140efac9..1bcbe07cecfc9541a2eba9415c227ae17a9c0449 100644 (file)
@@ -645,18 +645,6 @@ static int exynos_drm_init(void)
        if (!is_exynos)
                return -ENODEV;
 
-       /*
-        * Register device object only in case of Exynos SoC.
-        *
-        * Below codes resolves temporarily infinite loop issue incurred
-        * by Exynos drm driver when using multi-platform kernel.
-        * So these codes will be replaced with more generic way later.
-        */
-       if (!of_machine_is_compatible("samsung,exynos3") &&
-                       !of_machine_is_compatible("samsung,exynos4") &&
-                       !of_machine_is_compatible("samsung,exynos5"))
-               return -ENODEV;
-
        exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
                                                                NULL, 0);
        if (IS_ERR(exynos_drm_pdev))
index 5765a161abdd4b35c958086ad9fdf996b146dbc1..98051e8e855a1f07e8d5ef1e21995eb9783c1a1b 100644 (file)
@@ -1669,7 +1669,6 @@ static void hdmi_mode_apply(struct hdmi_context *hdata)
 
 static void hdmiphy_conf_reset(struct hdmi_context *hdata)
 {
-       u8 buffer[2];
        u32 reg;
 
        clk_disable_unprepare(hdata->res.sclk_hdmi);
@@ -1677,11 +1676,8 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
        clk_prepare_enable(hdata->res.sclk_hdmi);
 
        /* operation mode */
-       buffer[0] = 0x1f;
-       buffer[1] = 0x00;
-
-       if (hdata->hdmiphy_port)
-               i2c_master_send(hdata->hdmiphy_port, buffer, 2);
+       hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
+                               HDMI_PHY_ENABLE_MODE_SET);
 
        if (hdata->type == HDMI_TYPE13)
                reg = HDMI_V13_PHY_RSTOUT;
index 820b76234ef4c5c6da02ebe50327066c5fe38d1a..064ed6597defefad5a2efa3bba63ba55f5551c13 100644 (file)
@@ -1026,6 +1026,7 @@ static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
 static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
 {
        struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
+       int err;
 
        mutex_lock(&mixer_ctx->mixer_mutex);
        if (!mixer_ctx->powered) {
@@ -1034,7 +1035,11 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
        }
        mutex_unlock(&mixer_ctx->mixer_mutex);
 
-       drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
+       err = drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
+       if (err < 0) {
+               DRM_DEBUG_KMS("failed to acquire vblank counter\n");
+               return;
+       }
 
        atomic_set(&mixer_ctx->wait_vsync_event, 1);
 
@@ -1262,8 +1267,6 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
                return ret;
        }
 
-       pm_runtime_enable(dev);
-
        return 0;
 }
 
@@ -1272,8 +1275,6 @@ static void mixer_unbind(struct device *dev, struct device *master, void *data)
        struct mixer_context *ctx = dev_get_drvdata(dev);
 
        mixer_mgr_remove(&ctx->manager);
-
-       pm_runtime_disable(dev);
 }
 
 static const struct component_ops mixer_component_ops = {
index d4762799351d9d5e67efbce0987e10c0e974939c..a9041d1a8ff002f332705db5d80082761788ad19 100644 (file)
@@ -32,6 +32,8 @@
 struct tda998x_priv {
        struct i2c_client *cec;
        struct i2c_client *hdmi;
+       struct mutex mutex;
+       struct delayed_work dwork;
        uint16_t rev;
        uint8_t current_page;
        int dpms;
@@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
        uint8_t addr = REG2ADDR(reg);
        int ret;
 
+       mutex_lock(&priv->mutex);
        ret = set_page(priv, reg);
        if (ret < 0)
-               return ret;
+               goto out;
 
        ret = i2c_master_send(client, &addr, sizeof(addr));
        if (ret < 0)
@@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
        if (ret < 0)
                goto fail;
 
-       return ret;
+       goto out;
 
 fail:
        dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
+out:
+       mutex_unlock(&priv->mutex);
        return ret;
 }
 
@@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
        buf[0] = REG2ADDR(reg);
        memcpy(&buf[1], p, cnt);
 
+       mutex_lock(&priv->mutex);
        ret = set_page(priv, reg);
        if (ret < 0)
-               return;
+               goto out;
 
        ret = i2c_master_send(client, buf, cnt + 1);
        if (ret < 0)
                dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+       mutex_unlock(&priv->mutex);
 }
 
 static int
@@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
        uint8_t buf[] = {REG2ADDR(reg), val};
        int ret;
 
+       mutex_lock(&priv->mutex);
        ret = set_page(priv, reg);
        if (ret < 0)
-               return;
+               goto out;
 
        ret = i2c_master_send(client, buf, sizeof(buf));
        if (ret < 0)
                dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+       mutex_unlock(&priv->mutex);
 }
 
 static void
@@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
        uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
        int ret;
 
+       mutex_lock(&priv->mutex);
        ret = set_page(priv, reg);
        if (ret < 0)
-               return;
+               goto out;
 
        ret = i2c_master_send(client, buf, sizeof(buf));
        if (ret < 0)
                dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+       mutex_unlock(&priv->mutex);
 }
 
 static void
@@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)
        reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
 }
 
+/* handle HDMI connect/disconnect */
+static void tda998x_hpd(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct tda998x_priv *priv =
+                       container_of(dwork, struct tda998x_priv, dwork);
+
+       if (priv->encoder && priv->encoder->dev)
+               drm_kms_helper_hotplug_event(priv->encoder->dev);
+}
+
 /*
  * only 2 interrupts may occur: screen plug/unplug and EDID read
  */
@@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
                priv->wq_edid_wait = 0;
                wake_up(&priv->wq_edid);
        } else if (cec != 0) {                  /* HPD change */
-               if (priv->encoder && priv->encoder->dev)
-                       drm_helper_hpd_irq_event(priv->encoder->dev);
+               schedule_delayed_work(&priv->dwork, HZ/10);
        }
        return IRQ_HANDLED;
 }
@@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)
        /* disable all IRQs and free the IRQ handler */
        cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
        reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
-       if (priv->hdmi->irq)
+       if (priv->hdmi->irq) {
                free_irq(priv->hdmi->irq, priv);
+               cancel_delayed_work_sync(&priv->dwork);
+       }
 
        i2c_unregister_device(priv->cec);
 }
@@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
        struct device_node *np = client->dev.of_node;
        u32 video;
        int rev_lo, rev_hi, ret;
+       unsigned short cec_addr;
 
        priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
        priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
@@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 
        priv->current_page = 0xff;
        priv->hdmi = client;
-       priv->cec = i2c_new_dummy(client->adapter, 0x34);
+       /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
+       cec_addr = 0x34 + (client->addr & 0x03);
+       priv->cec = i2c_new_dummy(client->adapter, cec_addr);
        if (!priv->cec)
                return -ENODEV;
 
        priv->dpms = DRM_MODE_DPMS_OFF;
 
+       mutex_init(&priv->mutex);       /* protect the page access */
+
        /* wake up the device: */
        cec_write(priv, REG_CEC_ENAMODS,
                        CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
        if (client->irq) {
                int irqf_trigger;
 
-               /* init read EDID waitqueue */
+               /* init read EDID waitqueue and HDP work */
                init_waitqueue_head(&priv->wq_edid);
+               INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
 
                /* clear pending interrupts */
                reg_read(priv, REG_INT_FLAGS_0);
index 574057cd1d0986b6bc96819f67c72ba3f5280891..7643300828c3aef79d32260af5f2c8e4c7ff5b2c 100644 (file)
@@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev)
                        } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_LPT;
                                DRM_DEBUG_KMS("Found LynxPoint PCH\n");
-                               WARN_ON(!IS_HASWELL(dev));
-                               WARN_ON(IS_HSW_ULT(dev));
-                       } else if (IS_BROADWELL(dev)) {
-                               dev_priv->pch_type = PCH_LPT;
-                               dev_priv->pch_id =
-                                       INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
-                               DRM_DEBUG_KMS("This is Broadwell, assuming "
-                                             "LynxPoint LP PCH\n");
+                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+                               WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
                        } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_LPT;
                                DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
-                               WARN_ON(!IS_HASWELL(dev));
-                               WARN_ON(!IS_HSW_ULT(dev));
+                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+                               WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
                        } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_SPT;
                                DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
index 70d0f0f06f1a65ccec340c0882f0481bf66903a5..9d7a7155bf02a6f9fb44d504e69e635fefcc9c2e 100644 (file)
@@ -1756,8 +1756,6 @@ struct drm_i915_private {
         */
        struct workqueue_struct *dp_wq;
 
-       uint32_t bios_vgacntr;
-
        /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
        struct {
                int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
@@ -2161,8 +2159,7 @@ struct drm_i915_cmd_table {
 #define IS_HSW_EARLY_SDV(dev)  (IS_HASWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
 #define IS_BDW_ULT(dev)                (IS_BROADWELL(dev) && \
-                                ((INTEL_DEVID(dev) & 0xf) == 0x2  || \
-                                (INTEL_DEVID(dev) & 0xf) == 0x6 || \
+                                ((INTEL_DEVID(dev) & 0xf) == 0x6 ||    \
                                 (INTEL_DEVID(dev) & 0xf) == 0xe))
 #define IS_BDW_GT3(dev)                (IS_BROADWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
index 52adcb680be3a61113630493c6c5b98509965912..5f614828d365555f70005aff470117ab15b3ae12 100644 (file)
@@ -1048,6 +1048,7 @@ int
 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                      struct drm_file *file)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_pwrite *args = data;
        struct drm_i915_gem_object *obj;
        int ret;
@@ -1067,9 +1068,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                        return -EFAULT;
        }
 
+       intel_runtime_pm_get(dev_priv);
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
-               return ret;
+               goto put_rpm;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (&obj->base == NULL) {
@@ -1121,6 +1124,9 @@ out:
        drm_gem_object_unreference(&obj->base);
 unlock:
        mutex_unlock(&dev->struct_mutex);
+put_rpm:
+       intel_runtime_pm_put(dev_priv);
+
        return ret;
 }
 
@@ -3142,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
                u32 size = i915_gem_obj_ggtt_size(obj);
                uint64_t val;
 
+               /* Adjust fence size to match tiled area */
+               if (obj->tiling_mode != I915_TILING_NONE) {
+                       uint32_t row_size = obj->stride *
+                               (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+                       size = (size / row_size) * row_size;
+               }
+
                val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
                                 0xfffff000) << 32;
                val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
@@ -4878,25 +4891,18 @@ i915_gem_init_hw(struct drm_device *dev)
        for (i = 0; i < NUM_L3_SLICES(dev); i++)
                i915_gem_l3_remap(&dev_priv->ring[RCS], i);
 
-       /*
-        * XXX: Contexts should only be initialized once. Doing a switch to the
-        * default context switch however is something we'd like to do after
-        * reset or thaw (the latter may not actually be necessary for HW, but
-        * goes with our code better). Context switching requires rings (for
-        * the do_switch), but before enabling PPGTT. So don't move this.
-        */
-       ret = i915_gem_context_enable(dev_priv);
+       ret = i915_ppgtt_init_hw(dev);
        if (ret && ret != -EIO) {
-               DRM_ERROR("Context enable failed %d\n", ret);
+               DRM_ERROR("PPGTT enable failed %d\n", ret);
                i915_gem_cleanup_ringbuffer(dev);
-
-               return ret;
        }
 
-       ret = i915_ppgtt_init_hw(dev);
+       ret = i915_gem_context_enable(dev_priv);
        if (ret && ret != -EIO) {
-               DRM_ERROR("PPGTT enable failed %d\n", ret);
+               DRM_ERROR("Context enable failed %d\n", ret);
                i915_gem_cleanup_ringbuffer(dev);
+
+               return ret;
        }
 
        return ret;
@@ -5149,7 +5155,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
        if (!mutex_is_locked(mutex))
                return false;
 
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
        return mutex->owner == task;
 #else
        /* Since UP may be pre-empted, we cannot assume that we own the lock */
index 996c2931c49945d86a595c6f38104c475fc32f1b..b051a238baf9338cb3e47a57ae31d6f1b540d006 100644 (file)
@@ -292,6 +292,23 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
        spin_unlock_irq(&dev_priv->irq_lock);
 }
 
+u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
+{
+       /*
+        * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
+        * if GEN6_PM_UP_EI_EXPIRED is masked.
+        *
+        * TODO: verify if this can be reproduced on VLV,CHV.
+        */
+       if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
+               mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
+
+       if (INTEL_INFO(dev_priv)->gen >= 8)
+               mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+
+       return mask;
+}
+
 void gen6_disable_rps_interrupts(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -304,8 +321,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
 
        spin_lock_irq(&dev_priv->irq_lock);
 
-       I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
-                  ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
+       I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
 
        __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
        I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
@@ -3725,8 +3741,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
        if ((iir & flip_pending) == 0)
                goto check_page_flip;
 
-       intel_prepare_page_flip(dev, plane);
-
        /* We detect FlipDone by looking for the change in PendingFlip from '1'
         * to '0' on the following vblank, i.e. IIR has the Pendingflip
         * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3736,6 +3750,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
        if (I915_READ16(ISR) & flip_pending)
                goto check_page_flip;
 
+       intel_prepare_page_flip(dev, plane);
        intel_finish_page_flip(dev, pipe);
        return true;
 
@@ -3907,8 +3922,6 @@ static bool i915_handle_vblank(struct drm_device *dev,
        if ((iir & flip_pending) == 0)
                goto check_page_flip;
 
-       intel_prepare_page_flip(dev, plane);
-
        /* We detect FlipDone by looking for the change in PendingFlip from '1'
         * to '0' on the following vblank, i.e. IIR has the Pendingflip
         * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3918,6 +3931,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
        if (I915_READ(ISR) & flip_pending)
                goto check_page_flip;
 
+       intel_prepare_page_flip(dev, plane);
        intel_finish_page_flip(dev, pipe);
        return true;
 
index fb3e3d429191247c5041af8ca0212c6ce1e2f705..e7a16f119a294d0d20d0afa832d6d0b2a803edd7 100644 (file)
@@ -9815,7 +9815,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
                        /* vlv: DISPLAY_FLIP fails to change tiling */
                        ring = NULL;
-       } else if (IS_IVYBRIDGE(dev)) {
+       } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
                ring = &dev_priv->ring[BCS];
        } else if (INTEL_INFO(dev)->gen >= 7) {
                ring = obj->ring;
@@ -13057,11 +13057,7 @@ static void i915_disable_vga(struct drm_device *dev)
        vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
        udelay(300);
 
-       /*
-        * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
-        * from S3 without preserving (some of?) the other bits.
-        */
-       I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
+       I915_WRITE(vga_reg, VGA_DISP_DISABLE);
        POSTING_READ(vga_reg);
 }
 
@@ -13146,8 +13142,6 @@ void intel_modeset_init(struct drm_device *dev)
 
        intel_shared_dpll_init(dev);
 
-       /* save the BIOS value before clobbering it */
-       dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
        /* Just disable it once at startup */
        i915_disable_vga(dev);
        intel_setup_outputs(dev);
index 25fdbb16d4e0defa47f660d2f2a940582b83608f..3b40a17b8852fa7d3ff0519a37baef85c09f467b 100644 (file)
@@ -794,6 +794,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen6_reset_rps_interrupts(struct drm_device *dev);
 void gen6_enable_rps_interrupts(struct drm_device *dev);
 void gen6_disable_rps_interrupts(struct drm_device *dev);
+u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
 static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
index 4d63839bd9b4c53be99842c38188331f8e7817c7..dfb783a8f2c36e05bc08abfe21a1272c2903a69f 100644 (file)
@@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
 
        WARN_ON(panel->backlight.max == 0);
 
-       if (panel->backlight.level == 0) {
+       if (panel->backlight.level <= panel->backlight.min) {
                panel->backlight.level = panel->backlight.max;
                if (panel->backlight.device)
                        panel->backlight.device->props.brightness =
index 964b28e3c6303e5db6ecd5458c42b90b27cdd11f..bf814a64582a3eee53964ce4a8f4a62913b860a7 100644 (file)
@@ -4363,16 +4363,7 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
        mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
        mask &= dev_priv->pm_rps_events;
 
-       /* IVB and SNB hard hangs on looping batchbuffer
-        * if GEN6_PM_UP_EI_EXPIRED is masked.
-        */
-       if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
-               mask |= GEN6_PM_RP_UP_EI_EXPIRED;
-
-       if (IS_GEN8(dev_priv->dev))
-               mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
-
-       return ~mask;
+       return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
 }
 
 /* gen6_set_rps is called to update the frequency request, but should also be
@@ -4441,7 +4432,8 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
                return;
 
        /* Mask turbo interrupt so that they will not come in between */
-       I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+       I915_WRITE(GEN6_PMINTRMSK,
+                  gen6_sanitize_rps_pm_mask(dev_priv, ~0));
 
        vlv_force_gfx_clock(dev_priv, true);
 
index f5a78d53e2978ed1c4f25cdaa73d9fcb0ca887bb..ac6da7102fbbdc53c74e584234c51506191da1ee 100644 (file)
@@ -615,29 +615,6 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
                vlv_power_sequencer_reset(dev_priv);
 }
 
-static void check_power_well_state(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
-
-       if (power_well->always_on || !i915.disable_power_well) {
-               if (!enabled)
-                       goto mismatch;
-
-               return;
-       }
-
-       if (enabled != (power_well->count > 0))
-               goto mismatch;
-
-       return;
-
-mismatch:
-       WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
-                 power_well->name, power_well->always_on, enabled,
-                 power_well->count, i915.disable_power_well);
-}
-
 /**
  * intel_display_power_get - grab a power domain reference
  * @dev_priv: i915 device instance
@@ -669,8 +646,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
                        power_well->ops->enable(dev_priv, power_well);
                        power_well->hw_enabled = true;
                }
-
-               check_power_well_state(dev_priv, power_well);
        }
 
        power_domains->domain_use_count[domain]++;
@@ -709,8 +684,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
                        power_well->hw_enabled = false;
                        power_well->ops->disable(dev_priv, power_well);
                }
-
-               check_power_well_state(dev_priv, power_well);
        }
 
        mutex_unlock(&power_domains->lock);
index ff2b434b3db480a47aa5b94515476714b2819693..760947e380c93bf429a0459622d62e376d450b13 100644 (file)
@@ -26,7 +26,7 @@
 void
 nvkm_event_put(struct nvkm_event *event, u32 types, int index)
 {
-       BUG_ON(!spin_is_locked(&event->refs_lock));
+       assert_spin_locked(&event->refs_lock);
        while (types) {
                int type = __ffs(types); types &= ~(1 << type);
                if (--event->refs[index * event->types_nr + type] == 0) {
@@ -39,7 +39,7 @@ nvkm_event_put(struct nvkm_event *event, u32 types, int index)
 void
 nvkm_event_get(struct nvkm_event *event, u32 types, int index)
 {
-       BUG_ON(!spin_is_locked(&event->refs_lock));
+       assert_spin_locked(&event->refs_lock);
        while (types) {
                int type = __ffs(types); types &= ~(1 << type);
                if (++event->refs[index * event->types_nr + type] == 1) {
index d1bcde55e9d734df7573366a21f87e8b40a5f18b..839a32577680bf32eecbed59e196e6d6ea3606da 100644 (file)
@@ -98,7 +98,7 @@ nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
        struct nvkm_event *event = notify->event;
        unsigned long flags;
 
-       BUG_ON(!spin_is_locked(&event->list_lock));
+       assert_spin_locked(&event->list_lock);
        BUG_ON(size != notify->size);
 
        spin_lock_irqsave(&event->refs_lock, flags);
index 674da1f095b29a1c1ecc524fef40eb3b60bc3a35..7329226906539fb0ed7f5f3ce6ab9280cd13a20c 100644 (file)
@@ -249,6 +249,39 @@ nve0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
                break;
+       case 0x106:
+               device->cname = "GK208B";
+               device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+               device->oclass[NVDEV_SUBDEV_GPIO   ] =  nve0_gpio_oclass;
+               device->oclass[NVDEV_SUBDEV_I2C    ] =  nve0_i2c_oclass;
+               device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
+               device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
+               device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
+               device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
+               device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
+               device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+               device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+               device->oclass[NVDEV_SUBDEV_PWR    ] =  nv108_pwr_oclass;
+               device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
+               device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nvd0_dmaeng_oclass;
+               device->oclass[NVDEV_ENGINE_FIFO   ] =  nv108_fifo_oclass;
+               device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
+               device->oclass[NVDEV_ENGINE_GR     ] =  nv108_graph_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] =  nvf0_disp_oclass;
+               device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
+               device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+               device->oclass[NVDEV_ENGINE_COPY2  ] = &nve0_copy2_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+               break;
        case 0x108:
                device->cname = "GK208";
                device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
index 5e58bba0dd5c5bbcf3527088abdc4a80eaeaec9c..a7a890fad1e537325eb24d477d75c24feb624f46 100644 (file)
@@ -44,8 +44,10 @@ static void
 pramin_fini(void *data)
 {
        struct priv *priv = data;
-       nv_wr32(priv->bios, 0x001700, priv->bar0);
-       kfree(priv);
+       if (priv) {
+               nv_wr32(priv->bios, 0x001700, priv->bar0);
+               kfree(priv);
+       }
 }
 
 static void *
index 00f2ca7e44a56af6bb2bf4db5ce159b6526f2d00..033a8e99949735866c751494fd9deae5765dc49e 100644 (file)
 
 #include "nv50.h"
 
+struct nvaa_ram_priv {
+       struct nouveau_ram base;
+       u64 poller_base;
+};
+
 static int
 nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
              struct nouveau_oclass *oclass, void *data, u32 datasize,
              struct nouveau_object **pobject)
 {
-       const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-       const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+       u32 rsvd_head = ( 256 * 1024); /* vga memory */
+       u32 rsvd_tail = (1024 * 1024); /* vbios etc */
        struct nouveau_fb *pfb = nouveau_fb(parent);
-       struct nouveau_ram *ram;
+       struct nvaa_ram_priv *priv;
        int ret;
 
-       ret = nouveau_ram_create(parent, engine, oclass, &ram);
-       *pobject = nv_object(ram);
+       ret = nouveau_ram_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
        if (ret)
                return ret;
 
-       ram->size = nv_rd32(pfb, 0x10020c);
-       ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
+       priv->base.type   = NV_MEM_TYPE_STOLEN;
+       priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+       priv->base.size   = (u64)nv_rd32(pfb, 0x100e14) << 12;
 
-       ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
-                             (rsvd_head + rsvd_tail), 1);
+       rsvd_tail += 0x1000;
+       priv->poller_base = priv->base.size - rsvd_tail;
+
+       ret = nouveau_mm_init(&pfb->vram, rsvd_head >> 12,
+                             (priv->base.size  - (rsvd_head + rsvd_tail)) >> 12,
+                             1);
        if (ret)
                return ret;
 
-       ram->type   = NV_MEM_TYPE_STOLEN;
-       ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
-       ram->get = nv50_ram_get;
-       ram->put = nv50_ram_put;
+       priv->base.get = nv50_ram_get;
+       priv->base.put = nv50_ram_put;
+       return 0;
+}
+
+static int
+nvaa_ram_init(struct nouveau_object *object)
+{
+       struct nouveau_fb *pfb = nouveau_fb(object);
+       struct nvaa_ram_priv *priv = (void *)object;
+       int ret;
+       u64 dniso, hostnb, flush;
+
+       ret = nouveau_ram_init(&priv->base);
+       if (ret)
+               return ret;
+
+       dniso  = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1;
+       hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1;
+       flush  = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1;
+
+       /* Enable NISO poller for various clients and set their associated
+        * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
+        */
+       nv_wr32(pfb, 0x100c18, dniso);
+       nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001);
+       nv_wr32(pfb, 0x100c1c, hostnb);
+       nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
+       nv_wr32(pfb, 0x100c24, flush);
+       nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
+
        return 0;
 }
 
@@ -60,7 +97,7 @@ nvaa_ram_oclass = {
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nvaa_ram_ctor,
                .dtor = _nouveau_ram_dtor,
-               .init = _nouveau_ram_init,
+               .init = nvaa_ram_init,
                .fini = _nouveau_ram_fini,
        },
 };
index a75c35ccf25c739010ac3f866f7106824e387ff6..165401c4045cfe56ee42eeba1eda1918c4ded5a3 100644 (file)
 
 #include "nv04.h"
 
-static void
-nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
-{
-       struct nv04_mc_priv *priv = (void *)pmc;
-       nv_wr08(priv, 0x088050, 0xff);
-}
-
 struct nouveau_oclass *
 nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
        .base.handle = NV_SUBDEV(MC, 0x4c),
@@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
                .fini = _nouveau_mc_fini,
        },
        .intr = nv04_mc_intr,
-       .msi_rearm = nv4c_mc_msi_rearm,
 }.base;
index 21ec561edc999458c5a8d4f1a99be19e67070ca0..bba2960d3dfbb5de9b6e69d977f72529624ab65c 100644 (file)
@@ -1572,8 +1572,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
         * so use the DMA API for them.
         */
        if (!nv_device_is_cpu_coherent(device) &&
-           ttm->caching_state == tt_uncached)
+           ttm->caching_state == tt_uncached) {
                ttm_dma_unpopulate(ttm_dma, dev->dev);
+               return;
+       }
 
 #if __OS_HAS_AGP
        if (drm->agp.stat == ENABLED) {
index 42c34babc2e5b728959ad5c8135266f8b3c147e2..bf0f9e21d714a80248749ed0594699054e1b7fea 100644 (file)
@@ -36,7 +36,14 @@ void
 nouveau_gem_object_del(struct drm_gem_object *gem)
 {
        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+       struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
        struct ttm_buffer_object *bo = &nvbo->bo;
+       struct device *dev = drm->dev->dev;
+       int ret;
+
+       ret = pm_runtime_get_sync(dev);
+       if (WARN_ON(ret < 0 && ret != -EACCES))
+               return;
 
        if (gem->import_attach)
                drm_prime_gem_destroy(gem, nvbo->bo.sg);
@@ -46,6 +53,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
        /* reset filp so nouveau_bo_del_ttm() can test for it */
        gem->filp = NULL;
        ttm_bo_unref(&bo);
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
 }
 
 int
@@ -53,7 +63,9 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 {
        struct nouveau_cli *cli = nouveau_cli(file_priv);
        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+       struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
        struct nouveau_vma *vma;
+       struct device *dev = drm->dev->dev;
        int ret;
 
        if (!cli->vm)
@@ -71,11 +83,16 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
                        goto out;
                }
 
+               ret = pm_runtime_get_sync(dev);
+               if (ret < 0 && ret != -EACCES)
+                       goto out;
+
                ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
-               if (ret) {
+               if (ret)
                        kfree(vma);
-                       goto out;
-               }
+
+               pm_runtime_mark_last_busy(dev);
+               pm_runtime_put_autosuspend(dev);
        } else {
                vma->refcount++;
        }
@@ -129,6 +146,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 {
        struct nouveau_cli *cli = nouveau_cli(file_priv);
        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+       struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+       struct device *dev = drm->dev->dev;
        struct nouveau_vma *vma;
        int ret;
 
@@ -141,8 +160,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 
        vma = nouveau_bo_vma_find(nvbo, cli->vm);
        if (vma) {
-               if (--vma->refcount == 0)
-                       nouveau_gem_object_unmap(nvbo, vma);
+               if (--vma->refcount == 0) {
+                       ret = pm_runtime_get_sync(dev);
+                       if (!WARN_ON(ret < 0 && ret != -EACCES)) {
+                               nouveau_gem_object_unmap(nvbo, vma);
+                               pm_runtime_mark_last_busy(dev);
+                               pm_runtime_put_autosuspend(dev);
+                       }
+               }
        }
        ttm_bo_unreserve(&nvbo->bo);
 }
index d59ec491dbb9cba64d76369e62ef61b836c094d8..ed644a4f6f57c4254349c3881a16955cd42cbc05 100644 (file)
@@ -1851,10 +1851,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
                                return pll;
                }
                /* otherwise, pick one of the plls */
-               if ((rdev->family == CHIP_KAVERI) ||
-                   (rdev->family == CHIP_KABINI) ||
+               if ((rdev->family == CHIP_KABINI) ||
                    (rdev->family == CHIP_MULLINS)) {
-                       /* KB/KV/ML has PPLL1 and PPLL2 */
+                       /* KB/ML has PPLL1 and PPLL2 */
                        pll_in_use = radeon_get_pll_use_mask(crtc);
                        if (!(pll_in_use & (1 << ATOM_PPLL2)))
                                return ATOM_PPLL2;
@@ -1863,7 +1862,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
                        DRM_ERROR("unable to allocate a PPLL\n");
                        return ATOM_PPLL_INVALID;
                } else {
-                       /* CI has PPLL0, PPLL1, and PPLL2 */
+                       /* CI/KV has PPLL0, PPLL1, and PPLL2 */
                        pll_in_use = radeon_get_pll_use_mask(crtc);
                        if (!(pll_in_use & (1 << ATOM_PPLL2)))
                                return ATOM_PPLL2;
@@ -2155,6 +2154,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
        case ATOM_PPLL0:
                /* disable the ppll */
                if ((rdev->family == CHIP_ARUBA) ||
+                   (rdev->family == CHIP_KAVERI) ||
                    (rdev->family == CHIP_BONAIRE) ||
                    (rdev->family == CHIP_HAWAII))
                        atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
index 11ba9d21b89b608788f623822bcfb6f9f14dbbf1..db42a670f9957c7fd6c3698b2c0be178ac6a02d2 100644 (file)
@@ -492,6 +492,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
        struct radeon_connector_atom_dig *dig_connector;
        int dp_clock;
 
+       if ((mode->clock > 340000) &&
+           (!radeon_connector_is_dp12_capable(connector)))
+               return MODE_CLOCK_HIGH;
+
        if (!radeon_connector->con_priv)
                return MODE_CLOCK_HIGH;
        dig_connector = radeon_connector->con_priv;
index 6dcde3798b45a026f0be30f8e7bffb8b254ace81..64fdae558d36e908cca5af2388b22996db975502 100644 (file)
@@ -6033,6 +6033,17 @@ void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
        radeon_ring_write(ring, 0);
        radeon_ring_write(ring, 1 << vm_id);
 
+       /* wait for the invalidate to complete */
+       radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+       radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
+                                WAIT_REG_MEM_FUNCTION(0) |  /* always */
+                                WAIT_REG_MEM_ENGINE(0))); /* me */
+       radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0); /* ref */
+       radeon_ring_write(ring, 0); /* mask */
+       radeon_ring_write(ring, 0x20); /* poll interval */
+
        /* compute doesn't have PFP */
        if (usepfp) {
                /* sync PFP to ME, otherwise we might get invalid PFP reads */
index dde5c7e29eb200b6dc78f1fad46197e43e0013ed..42cd0cffe210934b0dadac9eaff72add715c13f2 100644 (file)
@@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,
                for (; ndw > 0; ndw -= 2, --count, pe += 8) {
                        if (flags & R600_PTE_SYSTEM) {
                                value = radeon_vm_map_gart(rdev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
                        } else if (flags & R600_PTE_VALID) {
                                value = addr;
                        } else {
@@ -903,6 +902,9 @@ void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
 void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
                      unsigned vm_id, uint64_t pd_addr)
 {
+       u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
+                         SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
+
        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
        if (vm_id < 8) {
                radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
@@ -943,5 +945,12 @@ void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
        radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
        radeon_ring_write(ring, 1 << vm_id);
+
+       radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+       radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0); /* reference */
+       radeon_ring_write(ring, 0); /* mask */
+       radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 }
 
index ba85986febea5054762615ee7fc5edbaa275f2cd..03003f8a6de63ba00c741824c053070a009cd319 100644 (file)
 #define ATC_VM_APERTURE1_HIGH_ADDR                             0x330Cu
 #define ATC_VM_APERTURE1_LOW_ADDR                              0x3304u
 
+#define IH_VMID_0_LUT                                          0x3D40u
+
 #endif
index 2fe8cfc966d9304b6845f6f6d29c9e236d80b0cb..bafdf92a5732dfa679f74e47ee582a699f9d635e 100644 (file)
@@ -103,7 +103,7 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
        }
 
        sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
-       if (sad_count < 0) {
+       if (sad_count <= 0) {
                DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
                return;
        }
index 9b42001295ba587197f5bcb317daca4a0f83cb3d..e3e9c10cfba97438571b4b8bc88cbea7c3dab9f7 100644 (file)
@@ -2745,13 +2745,11 @@ int kv_dpm_init(struct radeon_device *rdev)
        pi->enable_auto_thermal_throttling = true;
        pi->disable_nb_ps3_in_battery = false;
        if (radeon_bapm == -1) {
-               /* There are stability issues reported on with
-                * bapm enabled on an asrock system.
-                */
-               if (rdev->pdev->subsystem_vendor == 0x1849)
-                       pi->bapm_enable = false;
-               else
+               /* only enable bapm on KB, ML by default */
+               if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
                        pi->bapm_enable = true;
+               else
+                       pi->bapm_enable = false;
        } else if (radeon_bapm == 0) {
                pi->bapm_enable = false;
        } else {
index 360de9f1f4914079d3de3ad0022a50862b934395..aea48c89b24170e692bc91bf2b4c927ef9d08784 100644 (file)
@@ -2516,6 +2516,16 @@ void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
        radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
        radeon_ring_write(ring, 1 << vm_id);
 
+       /* wait for the invalidate to complete */
+       radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+       radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
+                                WAIT_REG_MEM_ENGINE(0))); /* me */
+       radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0); /* ref */
+       radeon_ring_write(ring, 0); /* mask */
+       radeon_ring_write(ring, 0x20); /* poll interval */
+
        /* sync PFP to ME, otherwise we might get invalid PFP reads */
        radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
        radeon_ring_write(ring, 0x0);
index 50f88611ff60c832dc59e767543f91f8baf859eb..ce787a9f12c01fd1f8179d54ef610c27b9be131b 100644 (file)
@@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,
                for (; ndw > 0; ndw -= 2, --count, pe += 8) {
                        if (flags & R600_PTE_SYSTEM) {
                                value = radeon_vm_map_gart(rdev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
                        } else if (flags & R600_PTE_VALID) {
                                value = addr;
                        } else {
@@ -463,5 +462,11 @@ void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
        radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
        radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
        radeon_ring_write(ring, 1 << vm_id);
+
+       /* wait for invalidate to complete */
+       radeon_ring_write(ring, DMA_SRBM_READ_PACKET);
+       radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2));
+       radeon_ring_write(ring, 0); /* mask */
+       radeon_ring_write(ring, 0); /* value */
 }
 
index 2e12e4d69253fde453c3fb648566c0224bfe06bc..ad7125486894d18ae90b0bc507d248d92baaf3e6 100644 (file)
 #define        PACKET3_MEM_SEMAPHORE                           0x39
 #define        PACKET3_MPEG_INDEX                              0x3A
 #define        PACKET3_WAIT_REG_MEM                            0x3C
+#define                WAIT_REG_MEM_FUNCTION(x)                ((x) << 0)
+                /* 0 - always
+                * 1 - <
+                * 2 - <=
+                * 3 - ==
+                * 4 - !=
+                * 5 - >=
+                * 6 - >
+                */
+#define                WAIT_REG_MEM_MEM_SPACE(x)               ((x) << 4)
+                /* 0 - reg
+                * 1 - mem
+                */
+#define                WAIT_REG_MEM_ENGINE(x)                  ((x) << 8)
+                /* 0 - me
+                * 1 - pfp
+                */
 #define        PACKET3_MEM_WRITE                               0x3D
 #define        PACKET3_PFP_SYNC_ME                             0x42
 #define        PACKET3_SURFACE_SYNC                            0x43
                                         (1 << 21) |                    \
                                         (((n) & 0xFFFFF) << 0))
 
+#define DMA_SRBM_POLL_PACKET           ((9 << 28) |                    \
+                                        (1 << 27) |                    \
+                                        (1 << 26))
+
+#define DMA_SRBM_READ_PACKET           ((9 << 28) |                    \
+                                        (1 << 27))
+
 /* async DMA Packet types */
 #define        DMA_PACKET_WRITE                                  0x2
 #define        DMA_PACKET_COPY                                   0x3
index 74f06d5405913a7e78c18af9d365c6549b591f45..279801ca5110aff68d80ea452751d7f9b0bf748f 100644 (file)
@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
                return r;
        rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
        rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+       rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
        rdev->asic->gart.set_page = &r100_pci_gart_set_page;
        return radeon_gart_table_ram_alloc(rdev);
 }
@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
        WREG32(RADEON_AIC_HI_ADDR, 0);
 }
 
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
+{
+       return addr;
+}
+
 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
-                           uint64_t addr, uint32_t flags)
+                           uint64_t entry)
 {
        u32 *gtt = rdev->gart.ptr;
-       gtt[i] = cpu_to_le32(lower_32_bits(addr));
+       gtt[i] = cpu_to_le32(lower_32_bits(entry));
 }
 
 void r100_pci_gart_fini(struct radeon_device *rdev)
index 064ad5569ccaac826612aedd8d035db3996106db..08d68f3e13e9887ff7b06f18e899c34fd85ae31a 100644 (file)
@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
 #define R300_PTE_WRITEABLE (1 << 2)
 #define R300_PTE_READABLE  (1 << 3)
 
-void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
-                             uint64_t addr, uint32_t flags)
+uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
 {
-       void __iomem *ptr = rdev->gart.ptr;
-
        addr = (lower_32_bits(addr) >> 8) |
                ((upper_32_bits(addr) & 0xff) << 24);
        if (flags & RADEON_GART_PAGE_READ)
@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
                addr |= R300_PTE_WRITEABLE;
        if (!(flags & RADEON_GART_PAGE_SNOOP))
                addr |= R300_PTE_UNSNOOPED;
+       return addr;
+}
+
+void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
+                             uint64_t entry)
+{
+       void __iomem *ptr = rdev->gart.ptr;
+
        /* on x86 we want this to be CPU endian, on powerpc
         * on powerpc without HW swappers, it'll get swapped on way
         * into VRAM - so no need for cpu_to_le32 on VRAM tables */
-       writel(addr, ((void __iomem *)ptr) + (i * 4));
+       writel(entry, ((void __iomem *)ptr) + (i * 4));
 }
 
 int rv370_pcie_gart_init(struct radeon_device *rdev)
@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
                DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
        rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
        rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+       rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
        rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
        return radeon_gart_table_vram_alloc(rdev);
 }
index 54529b837afaa1d76147f764664047af5f09b5f6..3f2a8d3febcab277c2c4ccbb0dd0ecf4a10a26ed 100644 (file)
@@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
  * Dummy page
  */
 struct radeon_dummy_page {
+       uint64_t        entry;
        struct page     *page;
        dma_addr_t      addr;
 };
@@ -645,7 +646,7 @@ struct radeon_gart {
        unsigned                        num_cpu_pages;
        unsigned                        table_size;
        struct page                     **pages;
-       dma_addr_t                      *pages_addr;
+       uint64_t                        *pages_entry;
        bool                            ready;
 };
 
@@ -1847,8 +1848,9 @@ struct radeon_asic {
        /* gart */
        struct {
                void (*tlb_flush)(struct radeon_device *rdev);
+               uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
                void (*set_page)(struct radeon_device *rdev, unsigned i,
-                                uint64_t addr, uint32_t flags);
+                                uint64_t entry);
        } gart;
        struct {
                int (*init)(struct radeon_device *rdev);
@@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
 #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
-#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
+#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
+#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
 #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
 #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
 #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
index 850de57069bec0effcadd4a55c6b1abead242314..ed0e10eee2dcd7459d016c4e3e9f1fb6a2254f1c 100644 (file)
@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
                DRM_INFO("Forcing AGP to PCIE mode\n");
                rdev->flags |= RADEON_IS_PCIE;
                rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+               rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
                rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
        } else {
                DRM_INFO("Forcing AGP to PCI mode\n");
                rdev->flags |= RADEON_IS_PCI;
                rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+               rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
                rdev->asic->gart.set_page = &r100_pci_gart_set_page;
        }
        rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
        .mc_wait_for_idle = &r100_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &r100_pci_gart_tlb_flush,
+               .get_page_entry = &r100_pci_gart_get_page_entry,
                .set_page = &r100_pci_gart_set_page,
        },
        .ring = {
@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
        .mc_wait_for_idle = &r100_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &r100_pci_gart_tlb_flush,
+               .get_page_entry = &r100_pci_gart_get_page_entry,
                .set_page = &r100_pci_gart_set_page,
        },
        .ring = {
@@ -333,6 +337,20 @@ static struct radeon_asic_ring r300_gfx_ring = {
        .set_wptr = &r100_gfx_set_wptr,
 };
 
+static struct radeon_asic_ring rv515_gfx_ring = {
+       .ib_execute = &r100_ring_ib_execute,
+       .emit_fence = &r300_fence_ring_emit,
+       .emit_semaphore = &r100_semaphore_ring_emit,
+       .cs_parse = &r300_cs_parse,
+       .ring_start = &rv515_ring_start,
+       .ring_test = &r100_ring_test,
+       .ib_test = &r100_ib_test,
+       .is_lockup = &r100_gpu_is_lockup,
+       .get_rptr = &r100_gfx_get_rptr,
+       .get_wptr = &r100_gfx_get_wptr,
+       .set_wptr = &r100_gfx_set_wptr,
+};
+
 static struct radeon_asic r300_asic = {
        .init = &r300_init,
        .fini = &r300_fini,
@@ -345,6 +363,7 @@ static struct radeon_asic r300_asic = {
        .mc_wait_for_idle = &r300_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &r100_pci_gart_tlb_flush,
+               .get_page_entry = &r100_pci_gart_get_page_entry,
                .set_page = &r100_pci_gart_set_page,
        },
        .ring = {
@@ -411,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
        .mc_wait_for_idle = &r300_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rv370_pcie_gart_tlb_flush,
+               .get_page_entry = &rv370_pcie_gart_get_page_entry,
                .set_page = &rv370_pcie_gart_set_page,
        },
        .ring = {
@@ -477,6 +497,7 @@ static struct radeon_asic r420_asic = {
        .mc_wait_for_idle = &r300_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rv370_pcie_gart_tlb_flush,
+               .get_page_entry = &rv370_pcie_gart_get_page_entry,
                .set_page = &rv370_pcie_gart_set_page,
        },
        .ring = {
@@ -543,6 +564,7 @@ static struct radeon_asic rs400_asic = {
        .mc_wait_for_idle = &rs400_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rs400_gart_tlb_flush,
+               .get_page_entry = &rs400_gart_get_page_entry,
                .set_page = &rs400_gart_set_page,
        },
        .ring = {
@@ -609,6 +631,7 @@ static struct radeon_asic rs600_asic = {
        .mc_wait_for_idle = &rs600_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rs600_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -677,6 +700,7 @@ static struct radeon_asic rs690_asic = {
        .mc_wait_for_idle = &rs690_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rs400_gart_tlb_flush,
+               .get_page_entry = &rs400_gart_get_page_entry,
                .set_page = &rs400_gart_set_page,
        },
        .ring = {
@@ -745,10 +769,11 @@ static struct radeon_asic rv515_asic = {
        .mc_wait_for_idle = &rv515_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rv370_pcie_gart_tlb_flush,
+               .get_page_entry = &rv370_pcie_gart_get_page_entry,
                .set_page = &rv370_pcie_gart_set_page,
        },
        .ring = {
-               [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+               [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
        },
        .irq = {
                .set = &rs600_irq_set,
@@ -811,10 +836,11 @@ static struct radeon_asic r520_asic = {
        .mc_wait_for_idle = &r520_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rv370_pcie_gart_tlb_flush,
+               .get_page_entry = &rv370_pcie_gart_get_page_entry,
                .set_page = &rv370_pcie_gart_set_page,
        },
        .ring = {
-               [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+               [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
        },
        .irq = {
                .set = &rs600_irq_set,
@@ -905,6 +931,7 @@ static struct radeon_asic r600_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &r600_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -990,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &r600_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1081,6 +1109,7 @@ static struct radeon_asic rs780_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &r600_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1185,6 +1214,7 @@ static struct radeon_asic rv770_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &r600_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1303,6 +1333,7 @@ static struct radeon_asic evergreen_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &evergreen_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1395,6 +1426,7 @@ static struct radeon_asic sumo_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &evergreen_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1486,6 +1518,7 @@ static struct radeon_asic btc_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &evergreen_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1621,6 +1654,7 @@ static struct radeon_asic cayman_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &cayman_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .vm = {
@@ -1724,6 +1758,7 @@ static struct radeon_asic trinity_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &cayman_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .vm = {
@@ -1857,6 +1892,7 @@ static struct radeon_asic si_asic = {
        .get_gpu_clock_counter = &si_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &si_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .vm = {
@@ -2018,6 +2054,7 @@ static struct radeon_asic ci_asic = {
        .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &cik_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .vm = {
@@ -2125,6 +2162,7 @@ static struct radeon_asic kv_asic = {
        .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &cik_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .vm = {
index 2a45d548d5ece5d9cd1cdad32d64fb3808b0c6fd..8d787d115653d20ed2a460fa770416e73eeddbee 100644 (file)
@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 int r100_asic_reset(struct radeon_device *rdev);
 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
 void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
-                           uint64_t addr, uint32_t flags);
+                           uint64_t entry);
 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
 int r100_irq_set(struct radeon_device *rdev);
 int r100_irq_process(struct radeon_device *rdev);
@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
                                struct radeon_fence *fence);
 extern int r300_cs_parse(struct radeon_cs_parser *p);
 extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
 extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
-                                    uint64_t addr, uint32_t flags);
+                                    uint64_t entry);
 extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
 extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
 extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
 extern int rs400_suspend(struct radeon_device *rdev);
 extern int rs400_resume(struct radeon_device *rdev);
 void rs400_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
 void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
-                        uint64_t addr, uint32_t flags);
+                        uint64_t entry);
 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 int rs400_gart_init(struct radeon_device *rdev);
@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
 void rs600_irq_disable(struct radeon_device *rdev);
 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
 void rs600_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
 void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
-                        uint64_t addr, uint32_t flags);
+                        uint64_t entry);
 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rs600_bandwidth_update(struct radeon_device *rdev);
index 9e7f23dd14bd5992d73b72ddec32d084ee906aed..87d5fb21cb61cc8709e4f685fa6c3a24ecbb9e9e 100644 (file)
@@ -34,7 +34,8 @@
 
 static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
                                    uint64_t saddr, uint64_t daddr,
-                                   int flag, int n)
+                                   int flag, int n,
+                                   struct reservation_object *resv)
 {
        unsigned long start_jiffies;
        unsigned long end_jiffies;
@@ -47,12 +48,12 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
                case RADEON_BENCHMARK_COPY_DMA:
                        fence = radeon_copy_dma(rdev, saddr, daddr,
                                                size / RADEON_GPU_PAGE_SIZE,
-                                               NULL);
+                                               resv);
                        break;
                case RADEON_BENCHMARK_COPY_BLIT:
                        fence = radeon_copy_blit(rdev, saddr, daddr,
                                                 size / RADEON_GPU_PAGE_SIZE,
-                                                NULL);
+                                                resv);
                        break;
                default:
                        DRM_ERROR("Unknown copy method\n");
@@ -120,7 +121,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
 
        if (rdev->asic->copy.dma) {
                time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
-                                               RADEON_BENCHMARK_COPY_DMA, n);
+                                               RADEON_BENCHMARK_COPY_DMA, n,
+                                               dobj->tbo.resv);
                if (time < 0)
                        goto out_cleanup;
                if (time > 0)
@@ -130,7 +132,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
 
        if (rdev->asic->copy.blit) {
                time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
-                                               RADEON_BENCHMARK_COPY_BLIT, n);
+                                               RADEON_BENCHMARK_COPY_BLIT, n,
+                                               dobj->tbo.resv);
                if (time < 0)
                        goto out_cleanup;
                if (time > 0)
index 0ec65168f331c73bcbfff24ee719cd6f98790602..bd7519fdd3f431cbce8c2bc6bd3e588e525be5cd 100644 (file)
@@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
                rdev->dummy_page.page = NULL;
                return -ENOMEM;
        }
+       rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
+                                                           RADEON_GART_PAGE_DUMMY);
        return 0;
 }
 
index 102116902a070f728c434a8ee67215ede0cffb70..913fafa597ad210180c03e03618002a702cda441 100644 (file)
@@ -960,6 +960,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
        if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
            pll->flags & RADEON_PLL_USE_REF_DIV)
                ref_div_max = pll->reference_div;
+       else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
+               /* fix for problems on RS880 */
+               ref_div_max = min(pll->max_ref_div, 7u);
        else
                ref_div_max = pll->max_ref_div;
 
index 84146d5901aa5aacad168255c14d72cafce5cf15..5450fa95a47efdcde9aa664c740cbe578e4f5b26 100644 (file)
@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
                radeon_bo_unpin(rdev->gart.robj);
        radeon_bo_unreserve(rdev->gart.robj);
        rdev->gart.table_addr = gpu_addr;
+
+       if (!r) {
+               int i;
+
+               /* We might have dropped some GART table updates while it wasn't
+                * mapped, restore all entries
+                */
+               for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+                       radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
+               mb();
+               radeon_gart_tlb_flush(rdev);
+       }
+
        return r;
 }
 
@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
        unsigned t;
        unsigned p;
        int i, j;
-       u64 page_base;
 
        if (!rdev->gart.ready) {
                WARN(1, "trying to unbind memory from uninitialized GART !\n");
@@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
        for (i = 0; i < pages; i++, p++) {
                if (rdev->gart.pages[p]) {
                        rdev->gart.pages[p] = NULL;
-                       rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
-                       page_base = rdev->gart.pages_addr[p];
                        for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+                               rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
                                if (rdev->gart.ptr) {
-                                       radeon_gart_set_page(rdev, t, page_base,
-                                                            RADEON_GART_PAGE_DUMMY);
+                                       radeon_gart_set_page(rdev, t,
+                                                            rdev->dummy_page.entry);
                                }
-                               page_base += RADEON_GPU_PAGE_SIZE;
                        }
                }
        }
@@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
 {
        unsigned t;
        unsigned p;
-       uint64_t page_base;
+       uint64_t page_base, page_entry;
        int i, j;
 
        if (!rdev->gart.ready) {
@@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
        p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
 
        for (i = 0; i < pages; i++, p++) {
-               rdev->gart.pages_addr[p] = dma_addr[i];
                rdev->gart.pages[p] = pagelist[i];
-               if (rdev->gart.ptr) {
-                       page_base = rdev->gart.pages_addr[p];
-                       for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
-                               radeon_gart_set_page(rdev, t, page_base, flags);
-                               page_base += RADEON_GPU_PAGE_SIZE;
+               page_base = dma_addr[i];
+               for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+                       page_entry = radeon_gart_get_page_entry(page_base, flags);
+                       rdev->gart.pages_entry[t] = page_entry;
+                       if (rdev->gart.ptr) {
+                               radeon_gart_set_page(rdev, t, page_entry);
                        }
+                       page_base += RADEON_GPU_PAGE_SIZE;
                }
        }
        mb();
@@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev)
                radeon_gart_fini(rdev);
                return -ENOMEM;
        }
-       rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
-                                       rdev->gart.num_cpu_pages);
-       if (rdev->gart.pages_addr == NULL) {
+       rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
+                                        rdev->gart.num_gpu_pages);
+       if (rdev->gart.pages_entry == NULL) {
                radeon_gart_fini(rdev);
                return -ENOMEM;
        }
        /* set GART entry to point to the dummy page by default */
-       for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
-               rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
-       }
+       for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+               rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
        return 0;
 }
 
@@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev)
  */
 void radeon_gart_fini(struct radeon_device *rdev)
 {
-       if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
+       if (rdev->gart.ready) {
                /* unbind pages */
                radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
        }
        rdev->gart.ready = false;
        vfree(rdev->gart.pages);
-       vfree(rdev->gart.pages_addr);
+       vfree(rdev->gart.pages_entry);
        rdev->gart.pages = NULL;
-       rdev->gart.pages_addr = NULL;
+       rdev->gart.pages_entry = NULL;
 
        radeon_dummy_page_fini(rdev);
 }
index a46f73737994aba3f603aea4dde15ff916a91109..ac3c1310b953182acb0db6db41add071fd88e737 100644 (file)
@@ -146,7 +146,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
        struct radeon_bo_va *bo_va;
        int r;
 
-       if (rdev->family < CHIP_CAYMAN) {
+       if ((rdev->family < CHIP_CAYMAN) ||
+           (!rdev->accel_working)) {
                return 0;
        }
 
@@ -176,7 +177,8 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
        struct radeon_bo_va *bo_va;
        int r;
 
-       if (rdev->family < CHIP_CAYMAN) {
+       if ((rdev->family < CHIP_CAYMAN) ||
+           (!rdev->accel_working)) {
                return;
        }
 
@@ -576,7 +578,7 @@ error_unreserve:
 error_free:
        drm_free_large(vm_bos);
 
-       if (r)
+       if (r && r != -ERESTARTSYS)
                DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 }
 
index 242fd8b1b221d9c49459b67d775aaf3c57fea45f..bef9a09532844b026fc1197c8692411b0840d8fa 100644 (file)
@@ -72,7 +72,7 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
                        uint32_t queue_id, uint32_t __user *wptr);
 
-static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
                                uint32_t pipe_id, uint32_t queue_id);
 
 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
@@ -92,7 +92,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .init_memory = kgd_init_memory,
        .init_pipeline = kgd_init_pipeline,
        .hqd_load = kgd_hqd_load,
-       .hqd_is_occupies = kgd_hqd_is_occupies,
+       .hqd_is_occupied = kgd_hqd_is_occupied,
        .hqd_destroy = kgd_hqd_destroy,
        .get_fw_version = get_fw_version
 };
@@ -101,6 +101,7 @@ static const struct kgd2kfd_calls *kgd2kfd;
 
 bool radeon_kfd_init(void)
 {
+#if defined(CONFIG_HSA_AMD_MODULE)
        bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*,
                                const struct kgd2kfd_calls**);
 
@@ -117,6 +118,17 @@ bool radeon_kfd_init(void)
        }
 
        return true;
+#elif defined(CONFIG_HSA_AMD)
+       if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
+               kgd2kfd = NULL;
+
+               return false;
+       }
+
+       return true;
+#else
+       return false;
+#endif
 }
 
 void radeon_kfd_fini(void)
@@ -378,6 +390,10 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
                cpu_relax();
        write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
 
+       /* Mapping vmid to pasid also for IH block */
+       write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t),
+                       pasid_mapping);
+
        return 0;
 }
 
@@ -420,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd)
 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
                                uint32_t hpd_size, uint64_t hpd_gpu_addr)
 {
-       uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+       uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
        uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
 
        lock_srbm(kgd, mec, pipe, 0, 0);
@@ -517,7 +533,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
        return 0;
 }
 
-static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
                                uint32_t pipe_id, uint32_t queue_id)
 {
        uint32_t act;
@@ -556,6 +572,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
                if (timeout == 0) {
                        pr_err("kfd: cp queue preemption time out (%dms)\n",
                                temp);
+                       release_queue(kgd);
                        return -ETIME;
                }
                msleep(20);
index 3cf9c1fa64756fb6b4430d5e351f21aee874ad1d..686411e4e4f6a3620289be34106ae5d38c9f6b93 100644 (file)
@@ -605,14 +605,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        return -ENOMEM;
                }
 
-               vm = &fpriv->vm;
-               r = radeon_vm_init(rdev, vm);
-               if (r) {
-                       kfree(fpriv);
-                       return r;
-               }
-
                if (rdev->accel_working) {
+                       vm = &fpriv->vm;
+                       r = radeon_vm_init(rdev, vm);
+                       if (r) {
+                               kfree(fpriv);
+                               return r;
+                       }
+
                        r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
                        if (r) {
                                radeon_vm_fini(rdev, vm);
@@ -668,9 +668,9 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
                                        radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
                                radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
                        }
+                       radeon_vm_fini(rdev, vm);
                }
 
-               radeon_vm_fini(rdev, vm);
                kfree(fpriv);
                file_priv->driver_priv = NULL;
        }
index 32522cc940a127c4b413c9928121c2b9258457fd..f7da8fe96a66b51e40aedb6ee0255770821cb989 100644 (file)
@@ -1287,8 +1287,39 @@ dpm_failed:
        return ret;
 }
 
+struct radeon_dpm_quirk {
+       u32 chip_vendor;
+       u32 chip_device;
+       u32 subsys_vendor;
+       u32 subsys_device;
+};
+
+/* cards with dpm stability problems */
+static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
+       /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
+       { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
+       /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
+       { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
+       { 0, 0, 0, 0 },
+};
+
 int radeon_pm_init(struct radeon_device *rdev)
 {
+       struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
+       bool disable_dpm = false;
+
+       /* Apply dpm quirks */
+       while (p && p->chip_device != 0) {
+               if (rdev->pdev->vendor == p->chip_vendor &&
+                   rdev->pdev->device == p->chip_device &&
+                   rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+                   rdev->pdev->subsystem_device == p->subsys_device) {
+                       disable_dpm = true;
+                       break;
+               }
+               ++p;
+       }
+
        /* enable dpm on rv6xx+ */
        switch (rdev->family) {
        case CHIP_RV610:
@@ -1344,6 +1375,8 @@ int radeon_pm_init(struct radeon_device *rdev)
                         (!(rdev->flags & RADEON_IS_IGP)) &&
                         (!rdev->smc_fw))
                        rdev->pm.pm_method = PM_METHOD_PROFILE;
+               else if (disable_dpm && (radeon_dpm == -1))
+                       rdev->pm.pm_method = PM_METHOD_PROFILE;
                else if (radeon_dpm == 0)
                        rdev->pm.pm_method = PM_METHOD_PROFILE;
                else
index 535403e0c8a28c20011261decb68eeb3d47b6846..15aee723db77ec171a5b8a32797d75e3d49e1469 100644 (file)
@@ -1703,7 +1703,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
        u32 format;
        u32 *buffer;
        const u8 __user *data;
-       int size, dwords, tex_width, blit_width, spitch;
+       unsigned int size, dwords, tex_width, blit_width, spitch;
        u32 height;
        int i;
        u32 texpitch, microtile;
index 07b506b410080f482b4d41948c3bd627ae4bb39e..791818165c761f7fdc1807b8f0b7e22de0aed24f 100644 (file)
@@ -119,11 +119,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
                if (ring == R600_RING_TYPE_DMA_INDEX)
                        fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
                                                size / RADEON_GPU_PAGE_SIZE,
-                                               NULL);
+                                               vram_obj->tbo.resv);
                else
                        fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
                                                 size / RADEON_GPU_PAGE_SIZE,
-                                                NULL);
+                                                vram_obj->tbo.resv);
                if (IS_ERR(fence)) {
                        DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
                        r = PTR_ERR(fence);
@@ -170,11 +170,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
                if (ring == R600_RING_TYPE_DMA_INDEX)
                        fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
                                                size / RADEON_GPU_PAGE_SIZE,
-                                               NULL);
+                                               vram_obj->tbo.resv);
                else
                        fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
                                                 size / RADEON_GPU_PAGE_SIZE,
-                                                NULL);
+                                                vram_obj->tbo.resv);
                if (IS_ERR(fence)) {
                        DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
                        r = PTR_ERR(fence);
index cde48c42b30ad4b63c27dde6741f8637840670fc..2a5a4a9e772d6668ee844b94c61219a0c3100340 100644 (file)
@@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
        uint64_t result;
 
        /* page table offset */
-       result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
-
-       /* in case cpu page size != gpu page size*/
-       result |= addr & (~PAGE_MASK);
+       result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
+       result &= ~RADEON_GPU_PAGE_MASK;
 
        return result;
 }
@@ -745,9 +743,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
         */
 
        /* NI is optimized for 256KB fragments, SI and newer for 64KB */
-       uint64_t frag_flags = rdev->family == CHIP_CAYMAN ?
+       uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) ||
+                              (rdev->family == CHIP_ARUBA)) ?
                        R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
-       uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80;
+       uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) ||
+                              (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80;
 
        uint64_t frag_start = ALIGN(pe_start, frag_align);
        uint64_t frag_end = pe_end & ~(frag_align - 1);
index c5799f16aa4b2f27157b3946ec4c7b65ac85011f..34e3235f41d2bf63a5333e33f63e2518f3668942 100644 (file)
@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
 #define RS400_PTE_WRITEABLE (1 << 2)
 #define RS400_PTE_READABLE  (1 << 3)
 
-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
-                        uint64_t addr, uint32_t flags)
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
 {
        uint32_t entry;
-       u32 *gtt = rdev->gart.ptr;
 
        entry = (lower_32_bits(addr) & PAGE_MASK) |
                ((upper_32_bits(addr) & 0xff) << 4);
@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
                entry |= RS400_PTE_WRITEABLE;
        if (!(flags & RADEON_GART_PAGE_SNOOP))
                entry |= RS400_PTE_UNSNOOPED;
-       entry = cpu_to_le32(entry);
-       gtt[i] = entry;
+       return entry;
+}
+
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+                        uint64_t entry)
+{
+       u32 *gtt = rdev->gart.ptr;
+       gtt[i] = cpu_to_le32(lower_32_bits(entry));
 }
 
 int rs400_mc_wait_for_idle(struct radeon_device *rdev)
index 9acb1c3c005b6ead68e940ba5443b88d3de0be0b..74bce91aecc11856dd5601c1fb19326014029471 100644 (file)
@@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)
        radeon_gart_table_vram_free(rdev);
 }
 
-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
-                        uint64_t addr, uint32_t flags)
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
 {
-       void __iomem *ptr = (void *)rdev->gart.ptr;
-
        addr = addr & 0xFFFFFFFFFFFFF000ULL;
        addr |= R600_PTE_SYSTEM;
        if (flags & RADEON_GART_PAGE_VALID)
@@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
                addr |= R600_PTE_WRITEABLE;
        if (flags & RADEON_GART_PAGE_SNOOP)
                addr |= R600_PTE_SNOOPED;
-       writeq(addr, ptr + (i * 8));
+       return addr;
+}
+
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+                        uint64_t entry)
+{
+       void __iomem *ptr = (void *)rdev->gart.ptr;
+       writeq(entry, ptr + (i * 8));
 }
 
 int rs600_irq_set(struct radeon_device *rdev)
index 60df444bd0756fbe42b5a5dab1072f47c5c73a3a..5d89b874a1a25851aa331af54f3ae6a0fd7c39cc 100644 (file)
@@ -5057,6 +5057,16 @@ void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
        radeon_ring_write(ring, 0);
        radeon_ring_write(ring, 1 << vm_id);
 
+       /* wait for the invalidate to complete */
+       radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+       radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
+                                WAIT_REG_MEM_ENGINE(0))); /* me */
+       radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0); /* ref */
+       radeon_ring_write(ring, 0); /* mask */
+       radeon_ring_write(ring, 0x20); /* poll interval */
+
        /* sync PFP to ME, otherwise we might get invalid PFP reads */
        radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
        radeon_ring_write(ring, 0x0);
index f5cc777e1c5f142ff00b383e3863b70b47a96b3e..83207929fc627f9a813acada5740c9ad14bb6bf1 100644 (file)
@@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev,
                for (; ndw > 0; ndw -= 2, --count, pe += 8) {
                        if (flags & R600_PTE_SYSTEM) {
                                value = radeon_vm_map_gart(rdev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
                        } else if (flags & R600_PTE_VALID) {
                                value = addr;
                        } else {
@@ -206,6 +205,14 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
        radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
        radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
        radeon_ring_write(ring, 1 << vm_id);
+
+       /* wait for invalidate to complete */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
+       radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
+       radeon_ring_write(ring, 0xff << 16); /* retry */
+       radeon_ring_write(ring, 1 << vm_id); /* mask */
+       radeon_ring_write(ring, 0); /* value */
+       radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
 }
 
 /**
index 32e354b8b0aba6b96cfc8e94b1519b7fca4f47fb..eff8a6444956310a591afdf3d980f2f3288cf386 100644 (file)
@@ -2908,6 +2908,22 @@ static int si_init_smc_spll_table(struct radeon_device *rdev)
        return ret;
 }
 
+struct si_dpm_quirk {
+       u32 chip_vendor;
+       u32 chip_device;
+       u32 subsys_vendor;
+       u32 subsys_device;
+       u32 max_sclk;
+       u32 max_mclk;
+};
+
+/* cards with dpm stability problems */
+static struct si_dpm_quirk si_dpm_quirk_list[] = {
+       /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+       { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+       { 0, 0, 0, 0 },
+};
+
 static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                                        struct radeon_ps *rps)
 {
@@ -2918,7 +2934,22 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
        u32 mclk, sclk;
        u16 vddc, vddci;
        u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
+       u32 max_sclk = 0, max_mclk = 0;
        int i;
+       struct si_dpm_quirk *p = si_dpm_quirk_list;
+
+       /* Apply dpm quirks */
+       while (p && p->chip_device != 0) {
+               if (rdev->pdev->vendor == p->chip_vendor &&
+                   rdev->pdev->device == p->chip_device &&
+                   rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+                   rdev->pdev->subsystem_device == p->subsys_device) {
+                       max_sclk = p->max_sclk;
+                       max_mclk = p->max_mclk;
+                       break;
+               }
+               ++p;
+       }
 
        if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
            ni_dpm_vblank_too_short(rdev))
@@ -2972,6 +3003,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                        if (ps->performance_levels[i].mclk > max_mclk_vddc)
                                ps->performance_levels[i].mclk = max_mclk_vddc;
                }
+               if (max_mclk) {
+                       if (ps->performance_levels[i].mclk > max_mclk)
+                               ps->performance_levels[i].mclk = max_mclk;
+               }
+               if (max_sclk) {
+                       if (ps->performance_levels[i].sclk > max_sclk)
+                               ps->performance_levels[i].sclk = max_sclk;
+               }
        }
 
        /* XXX validate the min clocks required for display */
index 4069be89e5852852ff4e884630571908dda6e387..84999242c74746317dfeed4954fee7baec4019e7 100644 (file)
 #define        PACKET3_MPEG_INDEX                              0x3A
 #define        PACKET3_COPY_DW                                 0x3B
 #define        PACKET3_WAIT_REG_MEM                            0x3C
+#define                WAIT_REG_MEM_FUNCTION(x)                ((x) << 0)
+                /* 0 - always
+                * 1 - <
+                * 2 - <=
+                * 3 - ==
+                * 4 - !=
+                * 5 - >=
+                * 6 - >
+                */
+#define                WAIT_REG_MEM_MEM_SPACE(x)               ((x) << 4)
+                /* 0 - reg
+                * 1 - mem
+                */
+#define                WAIT_REG_MEM_ENGINE(x)                  ((x) << 8)
+                /* 0 - me
+                * 1 - pfp
+                */
 #define        PACKET3_MEM_WRITE                               0x3D
 #define        PACKET3_COPY_DATA                               0x40
 #define        PACKET3_CP_DMA                                  0x41
 #define        DMA_PACKET_TRAP                                   0x7
 #define        DMA_PACKET_SRBM_WRITE                             0x9
 #define        DMA_PACKET_CONSTANT_FILL                          0xd
+#define        DMA_PACKET_POLL_REG_MEM                           0xe
 #define        DMA_PACKET_NOP                                    0xf
 
 #define VCE_STATUS                                     0x20004
index 7b5d22110f25e7619c37eac7fd66858fa25b93e8..6c6b655defcf4eac679913e70896810208dfd6ce 100644 (file)
@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
                if (unlikely(ret != 0))
                        --dev_priv->num_3d_resources;
        } else if (unhide_svga) {
-               mutex_lock(&dev_priv->hw_mutex);
                vmw_write(dev_priv, SVGA_REG_ENABLE,
                          vmw_read(dev_priv, SVGA_REG_ENABLE) &
                          ~SVGA_REG_ENABLE_HIDE);
-               mutex_unlock(&dev_priv->hw_mutex);
        }
 
        mutex_unlock(&dev_priv->release_mutex);
@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
        mutex_lock(&dev_priv->release_mutex);
        if (unlikely(--dev_priv->num_3d_resources == 0))
                vmw_release_device(dev_priv);
-       else if (hide_svga) {
-               mutex_lock(&dev_priv->hw_mutex);
+       else if (hide_svga)
                vmw_write(dev_priv, SVGA_REG_ENABLE,
                          vmw_read(dev_priv, SVGA_REG_ENABLE) |
                          SVGA_REG_ENABLE_HIDE);
-               mutex_unlock(&dev_priv->hw_mutex);
-       }
 
        n3d = (int32_t) dev_priv->num_3d_resources;
        mutex_unlock(&dev_priv->release_mutex);
@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        dev_priv->dev = dev;
        dev_priv->vmw_chipset = chipset;
        dev_priv->last_read_seqno = (uint32_t) -100;
-       mutex_init(&dev_priv->hw_mutex);
        mutex_init(&dev_priv->cmdbuf_mutex);
        mutex_init(&dev_priv->release_mutex);
        mutex_init(&dev_priv->binding_mutex);
        rwlock_init(&dev_priv->resource_lock);
        ttm_lock_init(&dev_priv->reservation_sem);
+       spin_lock_init(&dev_priv->hw_lock);
+       spin_lock_init(&dev_priv->waiter_lock);
+       spin_lock_init(&dev_priv->cap_lock);
 
        for (i = vmw_res_context; i < vmw_res_max; ++i) {
                idr_init(&dev_priv->res_idr[i]);
@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 
        dev_priv->enable_fb = enable_fbdev;
 
-       mutex_lock(&dev_priv->hw_mutex);
-
        vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
        svga_id = vmw_read(dev_priv, SVGA_REG_ID);
        if (svga_id != SVGA_ID_2) {
                ret = -ENOSYS;
                DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
-               mutex_unlock(&dev_priv->hw_mutex);
                goto out_err0;
        }
 
@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                dev_priv->prim_bb_mem = dev_priv->vram_size;
 
        ret = vmw_dma_masks(dev_priv);
-       if (unlikely(ret != 0)) {
-               mutex_unlock(&dev_priv->hw_mutex);
+       if (unlikely(ret != 0))
                goto out_err0;
-       }
 
        /*
         * Limit back buffer size to VRAM size.  Remove this once
@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        if (dev_priv->prim_bb_mem > dev_priv->vram_size)
                dev_priv->prim_bb_mem = dev_priv->vram_size;
 
-       mutex_unlock(&dev_priv->hw_mutex);
-
        vmw_print_capabilities(dev_priv->capabilities);
 
        if (dev_priv->capabilities & SVGA_CAP_GMR2) {
@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev,
                if (unlikely(ret != 0))
                        return ret;
                vmw_kms_save_vga(dev_priv);
-               mutex_lock(&dev_priv->hw_mutex);
                vmw_write(dev_priv, SVGA_REG_TRACES, 0);
-               mutex_unlock(&dev_priv->hw_mutex);
        }
 
        if (active) {
@@ -1196,9 +1184,7 @@ out_no_active_lock:
        if (!dev_priv->enable_fb) {
                vmw_kms_restore_vga(dev_priv);
                vmw_3d_resource_dec(dev_priv, true);
-               mutex_lock(&dev_priv->hw_mutex);
                vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-               mutex_unlock(&dev_priv->hw_mutex);
        }
        return ret;
 }
@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev,
                        DRM_ERROR("Unable to clean VRAM on master drop.\n");
                vmw_kms_restore_vga(dev_priv);
                vmw_3d_resource_dec(dev_priv, true);
-               mutex_lock(&dev_priv->hw_mutex);
                vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-               mutex_unlock(&dev_priv->hw_mutex);
        }
 
        dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev)
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct vmw_private *dev_priv = vmw_priv(dev);
 
-       mutex_lock(&dev_priv->hw_mutex);
        vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
        (void) vmw_read(dev_priv, SVGA_REG_ID);
-       mutex_unlock(&dev_priv->hw_mutex);
 
        /**
         * Reclaim 3d reference held by fbdev and potentially
index 4ee799b43d5dfc40a8077e60e9f34b17a8dddcf9..d26a6daa9719a23542cb8c575691f1d63851dba4 100644 (file)
@@ -399,7 +399,8 @@ struct vmw_private {
        uint32_t memory_size;
        bool has_gmr;
        bool has_mob;
-       struct mutex hw_mutex;
+       spinlock_t hw_lock;
+       spinlock_t cap_lock;
 
        /*
         * VGA registers.
@@ -449,8 +450,9 @@ struct vmw_private {
        atomic_t marker_seq;
        wait_queue_head_t fence_queue;
        wait_queue_head_t fifo_queue;
-       int fence_queue_waiters; /* Protected by hw_mutex */
-       int goal_queue_waiters; /* Protected by hw_mutex */
+       spinlock_t waiter_lock;
+       int fence_queue_waiters; /* Protected by waiter_lock */
+       int goal_queue_waiters; /* Protected by waiter_lock */
        atomic_t fifo_queue_waiters;
        uint32_t last_read_seqno;
        spinlock_t irq_lock;
@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
        return (struct vmw_master *) master->driver_priv;
 }
 
+/*
+ * The locking here is fine-grained, so that it is performed once
+ * for every read- and write operation. This is of course costly, but we
+ * don't perform much register access in the timing critical paths anyway.
+ * Instead we have the extra benefit of being sure that we don't forget
+ * the hw lock around register accesses.
+ */
 static inline void vmw_write(struct vmw_private *dev_priv,
                             unsigned int offset, uint32_t value)
 {
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
        outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
        outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
+       spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
 }
 
 static inline uint32_t vmw_read(struct vmw_private *dev_priv,
                                unsigned int offset)
 {
-       uint32_t val;
+       unsigned long irq_flags;
+       u32 val;
 
+       spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
        outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
        val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
+       spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+
        return val;
 }
 
index b7594cb758afc4299493122f4f6e44c68ef4d919..945f1e0dad9278145eed43708cb820dc54a566a3 100644 (file)
@@ -35,7 +35,7 @@ struct vmw_fence_manager {
        struct vmw_private *dev_priv;
        spinlock_t lock;
        struct list_head fence_list;
-       struct work_struct work, ping_work;
+       struct work_struct work;
        u32 user_fence_size;
        u32 fence_size;
        u32 event_fence_action_size;
@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f)
        return "svga";
 }
 
-static void vmw_fence_ping_func(struct work_struct *work)
-{
-       struct vmw_fence_manager *fman =
-               container_of(work, struct vmw_fence_manager, ping_work);
-
-       vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
-}
-
 static bool vmw_fence_enable_signaling(struct fence *f)
 {
        struct vmw_fence_obj *fence =
@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
        if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
                return false;
 
-       if (mutex_trylock(&dev_priv->hw_mutex)) {
-               vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
-               mutex_unlock(&dev_priv->hw_mutex);
-       } else
-               schedule_work(&fman->ping_work);
+       vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 
        return true;
 }
@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
        INIT_LIST_HEAD(&fman->fence_list);
        INIT_LIST_HEAD(&fman->cleanup_list);
        INIT_WORK(&fman->work, &vmw_fence_work_func);
-       INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
        fman->fifo_down = true;
        fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
        fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
        bool lists_empty;
 
        (void) cancel_work_sync(&fman->work);
-       (void) cancel_work_sync(&fman->ping_work);
 
        spin_lock_irqsave(&fman->lock, irq_flags);
        lists_empty = list_empty(&fman->fence_list) &&
index 09e10aefcd8eb94e6a22182b59591cdb30cbcd36..39f2b03888e7e5b7beb107cd0a32aa0345a328be 100644 (file)
@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
                if (!dev_priv->has_mob)
                        return false;
 
-               mutex_lock(&dev_priv->hw_mutex);
+               spin_lock(&dev_priv->cap_lock);
                vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
                result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
-               mutex_unlock(&dev_priv->hw_mutex);
+               spin_unlock(&dev_priv->cap_lock);
 
                return (result != 0);
        }
@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
        DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
 
-       mutex_lock(&dev_priv->hw_mutex);
        dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
        dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
        dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        mb();
 
        vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
-       mutex_unlock(&dev_priv->hw_mutex);
 
        max = ioread32(fifo_mem + SVGA_FIFO_MAX);
        min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        return vmw_fifo_send_fence(dev_priv, &dummy);
 }
 
-void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason)
+void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 {
        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+       static DEFINE_SPINLOCK(ping_lock);
+       unsigned long irq_flags;
 
+       /*
+        * The ping_lock is needed because we don't have an atomic
+        * test-and-set of the SVGA_FIFO_BUSY register.
+        */
+       spin_lock_irqsave(&ping_lock, irq_flags);
        if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
                iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
                vmw_write(dev_priv, SVGA_REG_SYNC, reason);
        }
-}
-
-void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
-{
-       mutex_lock(&dev_priv->hw_mutex);
-
-       vmw_fifo_ping_host_locked(dev_priv, reason);
-
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock_irqrestore(&ping_lock, irq_flags);
 }
 
 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 {
        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 
-       mutex_lock(&dev_priv->hw_mutex);
-
        vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
        while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
                ;
@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        vmw_write(dev_priv, SVGA_REG_TRACES,
                  dev_priv->traces_state);
 
-       mutex_unlock(&dev_priv->hw_mutex);
        vmw_marker_queue_takedown(&fifo->marker_queue);
 
        if (likely(fifo->static_buffer != NULL)) {
@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
                return vmw_fifo_wait_noirq(dev_priv, bytes,
                                           interruptible, timeout);
 
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
                spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
                outl(SVGA_IRQFLAG_FIFO_PROGRESS,
@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 
        if (interruptible)
                ret = wait_event_interruptible_timeout
@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
        else if (likely(ret > 0))
                ret = 0;
 
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
                spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
                dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 
        return ret;
 }
index 37881ecf5d7a9f74c49d4e1c018abc1cc2a9dffd..69c8ce23123c96af22c44011ff2b8fcdab837584 100644 (file)
@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
                (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
        compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
 
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->cap_lock);
        for (i = 0; i < max_size; ++i) {
                vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
                compat_cap->pairs[i][0] = i;
                compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->cap_lock);
 
        return 0;
 }
@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
                if (num > SVGA3D_DEVCAP_MAX)
                        num = SVGA3D_DEVCAP_MAX;
 
-               mutex_lock(&dev_priv->hw_mutex);
+               spin_lock(&dev_priv->cap_lock);
                for (i = 0; i < num; ++i) {
                        vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
                        *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
                }
-               mutex_unlock(&dev_priv->hw_mutex);
+               spin_unlock(&dev_priv->cap_lock);
        } else if (gb_objects) {
                ret = vmw_fill_compat_cap(dev_priv, bounce, size);
                if (unlikely(ret != 0))
index 0c423766c44119ca923825e879e3d05b7058cc90..9fe9827ee499c177e50735d1acbf84d13eb606f0 100644 (file)
@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
 
 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
 {
-       uint32_t busy;
 
-       mutex_lock(&dev_priv->hw_mutex);
-       busy = vmw_read(dev_priv, SVGA_REG_BUSY);
-       mutex_unlock(&dev_priv->hw_mutex);
-
-       return (busy == 0);
+       return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
 }
 
 void vmw_update_seqno(struct vmw_private *dev_priv,
@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
 
 void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
 {
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (dev_priv->fence_queue_waiters++ == 0) {
                unsigned long irq_flags;
 
@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 }
 
 void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
 {
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (--dev_priv->fence_queue_waiters == 0) {
                unsigned long irq_flags;
 
@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 }
 
 
 void vmw_goal_waiter_add(struct vmw_private *dev_priv)
 {
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (dev_priv->goal_queue_waiters++ == 0) {
                unsigned long irq_flags;
 
@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv)
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 }
 
 void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
 {
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (--dev_priv->goal_queue_waiters == 0) {
                unsigned long irq_flags;
 
@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 }
 
 int vmw_wait_seqno(struct vmw_private *dev_priv,
@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev)
        if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
                return;
 
-       mutex_lock(&dev_priv->hw_mutex);
        vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
-       mutex_unlock(&dev_priv->hw_mutex);
 
        status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
        outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
index 3725b521d9319c9b952bf3920bfcd3a27c61dac3..8725b79e7847d68239a25413c482883e44024704 100644 (file)
@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
        struct vmw_private *dev_priv = vmw_priv(dev);
        struct vmw_display_unit *du = vmw_connector_to_du(connector);
 
-       mutex_lock(&dev_priv->hw_mutex);
        num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
-       mutex_unlock(&dev_priv->hw_mutex);
 
        return ((vmw_connector_to_du(connector)->unit < num_displays &&
                 du->pref_active) ?
index 230b6f887cd86e9b4d3d4bf625166c878d5524ed..dfdc26970022998adfb951f50baacb0e4df38474 100644 (file)
@@ -27,7 +27,8 @@ if HID
 
 config HID_BATTERY_STRENGTH
        bool "Battery level reporting for HID devices"
-       depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
+       depends on HID
+       select POWER_SUPPLY
        default n
        ---help---
        This option adds support of reporting battery strength (for HID devices
index c3d0ac1a0988096eaacbe8063b354399b6a85e14..8b638792cb43c426c2e4fffb0bb594e76617f554 100644 (file)
@@ -1805,6 +1805,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
index 7460f3402298c2e1925059a1ef5cbf669e9d381b..9243359c18219ab75c5e47bda83ed44f3d322f05 100644 (file)
 #define USB_DEVICE_ID_KYE_GPEN_560     0x5003
 #define USB_DEVICE_ID_KYE_EASYPEN_I405X        0x5010
 #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X       0x5011
+#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2     0x501a
 #define USB_DEVICE_ID_KYE_EASYPEN_M610X        0x5013
 
 #define USB_VENDOR_ID_LABTEC           0x1020
index e0a0f06ac5ef6168c8fcdd5c2462df3f3130c941..9505605b6e22a72b29661d568aa6fe4dcaa32643 100644 (file)
@@ -311,6 +311,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
                               USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
          HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+                              USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
+         HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
                USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
          HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
index b92bf01a1ae8122f486ea333288558f082162f5d..158fcf577fae570d331a37c46d650a151141c19c 100644 (file)
@@ -323,6 +323,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                }
                break;
        case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+       case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
                if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
                        rdesc = mousepen_i608x_rdesc_fixed;
                        *rsize = sizeof(mousepen_i608x_rdesc_fixed);
@@ -415,6 +416,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
        switch (id->product) {
        case USB_DEVICE_ID_KYE_EASYPEN_I405X:
        case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+       case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
        case USB_DEVICE_ID_KYE_EASYPEN_M610X:
                ret = kye_tablet_enable(hdev);
                if (ret) {
@@ -445,6 +447,8 @@ static const struct hid_device_id kye_devices[] = {
                                USB_DEVICE_ID_KYE_EASYPEN_I405X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
                                USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+                               USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
                                USB_DEVICE_ID_KYE_EASYPEN_M610X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
index c917ab61aafa6156b433ed3148a31384ac6d4292..5bc6d80d5be79f465f3cbbb686c471db6162eb63 100644 (file)
@@ -962,10 +962,24 @@ static int logi_dj_raw_event(struct hid_device *hdev,
 
        switch (data[0]) {
        case REPORT_ID_DJ_SHORT:
+               if (size != DJREPORT_SHORT_LENGTH) {
+                       dev_err(&hdev->dev, "DJ report of bad size (%d)", size);
+                       return false;
+               }
                return logi_dj_dj_event(hdev, report, data, size);
        case REPORT_ID_HIDPP_SHORT:
-               /* intentional fallthrough */
+               if (size != HIDPP_REPORT_SHORT_LENGTH) {
+                       dev_err(&hdev->dev,
+                               "Short HID++ report of bad size (%d)", size);
+                       return false;
+               }
+               return logi_dj_hidpp_event(hdev, report, data, size);
        case REPORT_ID_HIDPP_LONG:
+               if (size != HIDPP_REPORT_LONG_LENGTH) {
+                       dev_err(&hdev->dev,
+                               "Long HID++ report of bad size (%d)", size);
+                       return false;
+               }
                return logi_dj_hidpp_event(hdev, report, data, size);
        }
 
index 2f420c0b6609b1f197a15be182d07102323b3973..a93cefe0e522e66fe670a28a269f13da9ff25d5d 100644 (file)
@@ -282,6 +282,33 @@ static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
                (report->rap.sub_id == 0x41);
 }
 
+/**
+ * hidpp_prefix_name() prefixes the current given name with "Logitech ".
+ */
+static void hidpp_prefix_name(char **name, int name_length)
+{
+#define PREFIX_LENGTH 9 /* "Logitech " */
+
+       int new_length;
+       char *new_name;
+
+       if (name_length > PREFIX_LENGTH &&
+           strncmp(*name, "Logitech ", PREFIX_LENGTH) == 0)
+               /* The prefix has is already in the name */
+               return;
+
+       new_length = PREFIX_LENGTH + name_length;
+       new_name = kzalloc(new_length, GFP_KERNEL);
+       if (!new_name)
+               return;
+
+       snprintf(new_name, new_length, "Logitech %s", *name);
+
+       kfree(*name);
+
+       *name = new_name;
+}
+
 /* -------------------------------------------------------------------------- */
 /* HIDP++ 1.0 commands                                                        */
 /* -------------------------------------------------------------------------- */
@@ -321,6 +348,10 @@ static char *hidpp_get_unifying_name(struct hidpp_device *hidpp_dev)
                return NULL;
 
        memcpy(name, &response.rap.params[2], len);
+
+       /* include the terminating '\0' */
+       hidpp_prefix_name(&name, len + 1);
+
        return name;
 }
 
@@ -498,6 +529,9 @@ static char *hidpp_get_device_name(struct hidpp_device *hidpp)
                index += ret;
        }
 
+       /* include the terminating '\0' */
+       hidpp_prefix_name(&name, __name_length + 1);
+
        return name;
 }
 
@@ -794,18 +828,25 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
 
        switch (data[0]) {
        case 0x02:
+               if (size < 2) {
+                       hid_err(hdev, "Received HID report of bad size (%d)",
+                               size);
+                       return 1;
+               }
                if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) {
                        input_event(wd->input, EV_KEY, BTN_LEFT,
                                        !!(data[1] & 0x01));
                        input_event(wd->input, EV_KEY, BTN_RIGHT,
                                        !!(data[1] & 0x02));
                        input_sync(wd->input);
+                       return 0;
                } else {
                        if (size < 21)
                                return 1;
                        return wtp_mouse_raw_xy_event(hidpp, &data[7]);
                }
        case REPORT_ID_HIDPP_LONG:
+               /* size is already checked in hidpp_raw_event. */
                if ((report->fap.feature_index != wd->mt_feature_index) ||
                    (report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY))
                        return 1;
index 1a07e07d99a06c8972a2d80b8fefa8aa4f4b3848..47d7e74231e5a3245461eb5f34a3acecc5bd67d1 100644 (file)
@@ -35,6 +35,8 @@ static struct class *pyra_class;
 static void profile_activated(struct pyra_device *pyra,
                unsigned int new_profile)
 {
+       if (new_profile >= ARRAY_SIZE(pyra->profile_settings))
+               return;
        pyra->actual_profile = new_profile;
        pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi;
 }
@@ -257,9 +259,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
        if (off != 0 || count != PYRA_SIZE_SETTINGS)
                return -EINVAL;
 
-       mutex_lock(&pyra->pyra_lock);
-
        settings = (struct pyra_settings const *)buf;
+       if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings))
+               return -EINVAL;
+
+       mutex_lock(&pyra->pyra_lock);
 
        retval = pyra_set_settings(usb_dev, settings);
        if (retval) {
index d32037cbf9db5e3bf9b1f4d96a3f8c98259a65a5..d43e967e75339ec7972e734e284c4356e31a4e38 100644 (file)
@@ -706,12 +706,7 @@ static int i2c_hid_start(struct hid_device *hid)
 
 static void i2c_hid_stop(struct hid_device *hid)
 {
-       struct i2c_client *client = hid->driver_data;
-       struct i2c_hid *ihid = i2c_get_clientdata(client);
-
        hid->claimed = 0;
-
-       i2c_hid_free_buffers(ihid);
 }
 
 static int i2c_hid_open(struct hid_device *hid)
index dc89be90b35e80f7d14d5dcaa71dda082c8ab84b..b27b3d33ebab02b9afb7d9fc9dad2e99b31afb14 100644 (file)
@@ -124,6 +124,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
index 6529c09c46f0fe99a02ddfbcd5a643954e1d6e22..a7de26d1ac801383e2ecc57acad47f082c016ce4 100644 (file)
@@ -574,6 +574,16 @@ config SENSORS_IIO_HWMON
          for those channels specified in the map.  This map can be provided
          either via platform data or the device tree bindings.
 
+config SENSORS_I5500
+       tristate "Intel 5500/5520/X58 temperature sensor"
+       depends on X86 && PCI
+       help
+         If you say yes here you get support for the temperature
+         sensor inside the Intel 5500, 5520 and X58 chipsets.
+
+         This driver can also be built as a module. If so, the module
+         will be called i5500_temp.
+
 config SENSORS_CORETEMP
        tristate "Intel Core/Core2/Atom temperature sensor"
        depends on X86
index 67280643bcf009e5b4af58bcf61787f963f3c043..6c941472e707a51b2dfb5f6889aea07bad4ff5b7 100644 (file)
@@ -68,6 +68,7 @@ obj-$(CONFIG_SENSORS_GPIO_FAN)        += gpio-fan.o
 obj-$(CONFIG_SENSORS_HIH6130)  += hih6130.o
 obj-$(CONFIG_SENSORS_HTU21)    += htu21.o
 obj-$(CONFIG_SENSORS_ULTRA45)  += ultra45_env.o
+obj-$(CONFIG_SENSORS_I5500)    += i5500_temp.o
 obj-$(CONFIG_SENSORS_I5K_AMB)  += i5k_amb.o
 obj-$(CONFIG_SENSORS_IBMAEM)   += ibmaem.o
 obj-$(CONFIG_SENSORS_IBMPEX)   += ibmpex.o
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c
new file mode 100644 (file)
index 0000000..3e3ccbf
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ * i5500_temp - Driver for Intel 5500/5520/X58 chipset thermal sensor
+ *
+ * Copyright (C) 2012, 2014 Jean Delvare <jdelvare@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+
+/* Register definitions from datasheet */
+#define REG_TSTHRCATA  0xE2
+#define REG_TSCTRL     0xE8
+#define REG_TSTHRRPEX  0xEB
+#define REG_TSTHRLO    0xEC
+#define REG_TSTHRHI    0xEE
+#define REG_CTHINT     0xF0
+#define REG_TSFSC      0xF3
+#define REG_CTSTS      0xF4
+#define REG_TSTHRRQPI  0xF5
+#define REG_CTCTRL     0xF7
+#define REG_TSTIMER    0xF8
+
+/*
+ * Sysfs stuff
+ */
+
+/* Sensor resolution : 0.5 degree C */
+static ssize_t show_temp(struct device *dev,
+                        struct device_attribute *devattr, char *buf)
+{
+       struct pci_dev *pdev = to_pci_dev(dev->parent);
+       long temp;
+       u16 tsthrhi;
+       s8 tsfsc;
+
+       pci_read_config_word(pdev, REG_TSTHRHI, &tsthrhi);
+       pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
+       temp = ((long)tsthrhi - tsfsc) * 500;
+
+       return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_thresh(struct device *dev,
+                          struct device_attribute *devattr, char *buf)
+{
+       struct pci_dev *pdev = to_pci_dev(dev->parent);
+       int reg = to_sensor_dev_attr(devattr)->index;
+       long temp;
+       u16 tsthr;
+
+       pci_read_config_word(pdev, reg, &tsthr);
+       temp = tsthr * 500;
+
+       return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_alarm(struct device *dev,
+                         struct device_attribute *devattr, char *buf)
+{
+       struct pci_dev *pdev = to_pci_dev(dev->parent);
+       int nr = to_sensor_dev_attr(devattr)->index;
+       u8 ctsts;
+
+       pci_read_config_byte(pdev, REG_CTSTS, &ctsts);
+       return sprintf(buf, "%u\n", (unsigned int)ctsts & (1 << nr));
+}
+
+static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_thresh, NULL, 0xE2);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_thresh, NULL, 0xEC);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_thresh, NULL, 0xEE);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
+
+static struct attribute *i5500_temp_attrs[] = {
+       &dev_attr_temp1_input.attr,
+       &sensor_dev_attr_temp1_crit.dev_attr.attr,
+       &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+       &sensor_dev_attr_temp1_max.dev_attr.attr,
+       &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+       NULL
+};
+
+ATTRIBUTE_GROUPS(i5500_temp);
+
+static const struct pci_device_id i5500_temp_ids[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3438) },
+       { 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, i5500_temp_ids);
+
+static int i5500_temp_probe(struct pci_dev *pdev,
+                           const struct pci_device_id *id)
+{
+       int err;
+       struct device *hwmon_dev;
+       u32 tstimer;
+       s8 tsfsc;
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to enable device\n");
+               return err;
+       }
+
+       pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
+       pci_read_config_dword(pdev, REG_TSTIMER, &tstimer);
+       if (tsfsc == 0x7F && tstimer == 0x07D30D40) {
+               dev_notice(&pdev->dev, "Sensor seems to be disabled\n");
+               return -ENODEV;
+       }
+
+       hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
+                                                          "intel5500", NULL,
+                                                          i5500_temp_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct pci_driver i5500_temp_driver = {
+       .name = "i5500_temp",
+       .id_table = i5500_temp_ids,
+       .probe = i5500_temp_probe,
+};
+
+module_pci_driver(i5500_temp_driver);
+
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
+MODULE_DESCRIPTION("Intel 5500/5520/X58 chipset thermal sensor driver");
+MODULE_LICENSE("GPL");
index 31e8308ba8990bffbaa6217aafa2bc0e72b62939..ab838d9e28b6389dc6d97dc633ea6259d2126ca3 100644 (file)
@@ -881,6 +881,7 @@ config I2C_XLR
 config I2C_RCAR
        tristate "Renesas R-Car I2C Controller"
        depends on ARCH_SHMOBILE || COMPILE_TEST
+       select I2C_SLAVE
        help
          If you say yes to this option, support will be included for the
          R-Car I2C controller.
index bff20a589621a031b37d1d1a661e4d5ba8e291b7..958c8db4ec30740e2d9aae00a7835256700d3424 100644 (file)
@@ -785,14 +785,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
        int ret;
 
        pm_runtime_get_sync(&adap->dev);
-       clk_prepare_enable(i2c->clk);
+       ret = clk_enable(i2c->clk);
+       if (ret)
+               return ret;
 
        for (retry = 0; retry < adap->retries; retry++) {
 
                ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
 
                if (ret != -EAGAIN) {
-                       clk_disable_unprepare(i2c->clk);
+                       clk_disable(i2c->clk);
                        pm_runtime_put(&adap->dev);
                        return ret;
                }
@@ -802,7 +804,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
                udelay(100);
        }
 
-       clk_disable_unprepare(i2c->clk);
+       clk_disable(i2c->clk);
        pm_runtime_put(&adap->dev);
        return -EREMOTEIO;
 }
@@ -1197,7 +1199,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
 
        clk_prepare_enable(i2c->clk);
        ret = s3c24xx_i2c_init(i2c);
-       clk_disable_unprepare(i2c->clk);
+       clk_disable(i2c->clk);
        if (ret != 0) {
                dev_err(&pdev->dev, "I2C controller init failed\n");
                return ret;
@@ -1210,6 +1212,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
                i2c->irq = ret = platform_get_irq(pdev, 0);
                if (ret <= 0) {
                        dev_err(&pdev->dev, "cannot find IRQ\n");
+                       clk_unprepare(i2c->clk);
                        return ret;
                }
 
@@ -1218,6 +1221,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
 
                if (ret != 0) {
                        dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
+                       clk_unprepare(i2c->clk);
                        return ret;
                }
        }
@@ -1225,6 +1229,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
        ret = s3c24xx_i2c_register_cpufreq(i2c);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
+               clk_unprepare(i2c->clk);
                return ret;
        }
 
@@ -1241,6 +1246,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to add bus to i2c core\n");
                s3c24xx_i2c_deregister_cpufreq(i2c);
+               clk_unprepare(i2c->clk);
                return ret;
        }
 
@@ -1262,6 +1268,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
 {
        struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
 
+       clk_unprepare(i2c->clk);
+
        pm_runtime_disable(&i2c->adap.dev);
        pm_runtime_disable(&pdev->dev);
 
@@ -1293,13 +1301,16 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+       int ret;
 
        if (!IS_ERR(i2c->sysreg))
                regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg);
 
-       clk_prepare_enable(i2c->clk);
+       ret = clk_enable(i2c->clk);
+       if (ret)
+               return ret;
        s3c24xx_i2c_init(i2c);
-       clk_disable_unprepare(i2c->clk);
+       clk_disable(i2c->clk);
        i2c->suspended = 0;
 
        return 0;
index 440d5dbc8b5f0c90ca3dd4341cb4a4e4858f791e..007818b3e1745bd1cb0e41a7581572aa70b2f4ea 100644 (file)
@@ -139,6 +139,7 @@ struct sh_mobile_i2c_data {
        int pos;
        int sr;
        bool send_stop;
+       bool stop_after_dma;
 
        struct resource *res;
        struct dma_chan *dma_tx;
@@ -407,7 +408,7 @@ static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd)
 
        if (pd->pos == pd->msg->len) {
                /* Send stop if we haven't yet (DMA case) */
-               if (pd->send_stop && (iic_rd(pd, ICCR) & ICCR_BBSY))
+               if (pd->send_stop && pd->stop_after_dma)
                        i2c_op(pd, OP_TX_STOP, 0);
                return 1;
        }
@@ -449,6 +450,13 @@ static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd)
                real_pos = pd->pos - 2;
 
                if (pd->pos == pd->msg->len) {
+                       if (pd->stop_after_dma) {
+                               /* Simulate PIO end condition after DMA transfer */
+                               i2c_op(pd, OP_RX_STOP, 0);
+                               pd->pos++;
+                               break;
+                       }
+
                        if (real_pos < 0) {
                                i2c_op(pd, OP_RX_STOP, 0);
                                break;
@@ -536,6 +544,7 @@ static void sh_mobile_i2c_dma_callback(void *data)
 
        sh_mobile_i2c_dma_unmap(pd);
        pd->pos = pd->msg->len;
+       pd->stop_after_dma = true;
 
        iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
 }
@@ -726,6 +735,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
                bool do_start = pd->send_stop || !i;
                msg = &msgs[i];
                pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
+               pd->stop_after_dma = false;
 
                err = start_ch(pd, msg, do_start);
                if (err)
index 39d25a8cb1ad355e099b8958afa2b5e8c2c16f75..e9eae57a2b50f77e3d25c4d9fcfa003728464740 100644 (file)
@@ -2972,6 +2972,7 @@ trace:
 }
 EXPORT_SYMBOL(i2c_smbus_xfer);
 
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
 int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
 {
        int ret;
@@ -3019,6 +3020,7 @@ int i2c_slave_unregister(struct i2c_client *client)
        return ret;
 }
 EXPORT_SYMBOL_GPL(i2c_slave_unregister);
+#endif
 
 MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
 MODULE_DESCRIPTION("I2C-Bus main module");
index 6631400b5f02028f804343099793c780ae4c0ee5..cf9b09db092f4e9969666565ba1562220d6b2a46 100644 (file)
@@ -74,7 +74,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
        struct eeprom_data *eeprom;
        unsigned long flags;
 
-       if (off + count >= attr->size)
+       if (off + count > attr->size)
                return -EFBIG;
 
        eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -92,7 +92,7 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
        struct eeprom_data *eeprom;
        unsigned long flags;
 
-       if (off + count >= attr->size)
+       if (off + count > attr->size)
                return -EFBIG;
 
        eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
index e37412da15f5c8ea300c7a096368d564a0acfba0..b99de00e57b86ce8164eb0b3ad02a8de11b8597e 100644 (file)
@@ -143,9 +143,15 @@ static int ad799x_write_config(struct ad799x_state *st, u16 val)
        case ad7998:
                return i2c_smbus_write_word_swapped(st->client, AD7998_CONF_REG,
                        val);
-       default:
+       case ad7992:
+       case ad7993:
+       case ad7994:
                return i2c_smbus_write_byte_data(st->client, AD7998_CONF_REG,
                        val);
+       default:
+               /* Will be written when doing a conversion */
+               st->config = val;
+               return 0;
        }
 }
 
@@ -155,8 +161,13 @@ static int ad799x_read_config(struct ad799x_state *st)
        case ad7997:
        case ad7998:
                return i2c_smbus_read_word_swapped(st->client, AD7998_CONF_REG);
-       default:
+       case ad7992:
+       case ad7993:
+       case ad7994:
                return i2c_smbus_read_byte_data(st->client, AD7998_CONF_REG);
+       default:
+               /* No readback support */
+               return st->config;
        }
 }
 
index 866fe904cba29e9f9f06d26fb0da16a9ce62b4d4..90c8cb727cc700b63f25c451c5eb4758bb3e400b 100644 (file)
@@ -449,6 +449,9 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
        if (val2 == NULL)
                val2 = &unused;
 
+       if(!iio_channel_has_info(chan->channel, info))
+               return -EINVAL;
+
        if (chan->indio_dev->info->read_raw_multi) {
                ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
                                        chan->channel, INDIO_MAX_RAW_ELEMENTS,
index e6c23b9eab336818fa785bae49f5c78c47221fbb..5db1a8cc388da0c5de517bf69b3d8136b94a1bbf 100644 (file)
@@ -123,7 +123,6 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
                                    struct ib_udata *uhw) = {
        [IB_USER_VERBS_EX_CMD_CREATE_FLOW]      = ib_uverbs_ex_create_flow,
        [IB_USER_VERBS_EX_CMD_DESTROY_FLOW]     = ib_uverbs_ex_destroy_flow,
-       [IB_USER_VERBS_EX_CMD_QUERY_DEVICE]     = ib_uverbs_ex_query_device
 };
 
 static void ib_uverbs_add_one(struct ib_device *device);
index 9edc200b311d861a9faa3eb0c051c5c9c7fbf3cc..57176ddd4c50ff677da5c711b8b7d14e9a020d0d 100644 (file)
@@ -235,19 +235,19 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
 
 static void set_emss(struct c4iw_ep *ep, u16 opt)
 {
-       ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] -
+       ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
                   ((AF_INET == ep->com.remote_addr.ss_family) ?
                    sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
                   sizeof(struct tcphdr);
        ep->mss = ep->emss;
-       if (GET_TCPOPT_TSTAMP(opt))
+       if (TCPOPT_TSTAMP_G(opt))
                ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
        if (ep->emss < 128)
                ep->emss = 128;
        if (ep->emss & 7)
                PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
-                    GET_TCPOPT_MSS(opt), ep->mss, ep->emss);
-       PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
+                    TCPOPT_MSS_G(opt), ep->mss, ep->emss);
+       PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
             ep->mss, ep->emss);
 }
 
@@ -652,29 +652,29 @@ static int send_connect(struct c4iw_ep *ep)
        if (win > RCV_BUFSIZ_M)
                win = RCV_BUFSIZ_M;
 
-       opt0 = (nocong ? NO_CONG(1) : 0) |
+       opt0 = (nocong ? NO_CONG_F : 0) |
               KEEP_ALIVE_F |
-              DELACK(1) |
+              DELACK_F |
               WND_SCALE_V(wscale) |
               MSS_IDX_V(mtu_idx) |
               L2T_IDX_V(ep->l2t->idx) |
               TX_CHAN_V(ep->tx_chan) |
               SMAC_SEL_V(ep->smac_idx) |
-              DSCP(ep->tos) |
+              DSCP_V(ep->tos) |
               ULP_MODE_V(ULP_MODE_TCPDDP) |
               RCV_BUFSIZ_V(win);
        opt2 = RX_CHANNEL_V(0) |
-              CCTRL_ECN(enable_ecn) |
+              CCTRL_ECN_V(enable_ecn) |
               RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
        if (enable_tcp_timestamps)
-               opt2 |= TSTAMPS_EN(1);
+               opt2 |= TSTAMPS_EN_F;
        if (enable_tcp_sack)
-               opt2 |= SACK_EN(1);
+               opt2 |= SACK_EN_F;
        if (wscale && enable_tcp_window_scaling)
                opt2 |= WND_SCALE_EN_F;
        if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
                opt2 |= T5_OPT_2_VALID_F;
-               opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+               opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
                opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
        }
        t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
@@ -1042,7 +1042,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
        struct c4iw_ep *ep;
        struct cpl_act_establish *req = cplhdr(skb);
        unsigned int tid = GET_TID(req);
-       unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
+       unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
        struct tid_info *t = dev->rdev.lldi.tids;
 
        ep = lookup_atid(t, atid);
@@ -1258,8 +1258,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
                                                    ep->hwtid));
        req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
-                                      F_RX_DACK_CHANGE |
-                                      V_RX_DACK_MODE(dack_mode));
+                                      RX_DACK_CHANGE_F |
+                                      RX_DACK_MODE_V(dack_mode));
        set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
        c4iw_ofld_send(&ep->com.dev->rdev, skb);
        return credits;
@@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
        skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
        req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
        memset(req, 0, sizeof(*req));
-       req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
+       req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
        req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
        req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
                                     ep->com.dev->rdev.lldi.ports[0],
@@ -1782,27 +1782,27 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
        if (win > RCV_BUFSIZ_M)
                win = RCV_BUFSIZ_M;
 
-       req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
-               (nocong ? NO_CONG(1) : 0) |
+       req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
+               (nocong ? NO_CONG_F : 0) |
                KEEP_ALIVE_F |
-               DELACK(1) |
+               DELACK_F |
                WND_SCALE_V(wscale) |
                MSS_IDX_V(mtu_idx) |
                L2T_IDX_V(ep->l2t->idx) |
                TX_CHAN_V(ep->tx_chan) |
                SMAC_SEL_V(ep->smac_idx) |
-               DSCP(ep->tos) |
+               DSCP_V(ep->tos) |
                ULP_MODE_V(ULP_MODE_TCPDDP) |
                RCV_BUFSIZ_V(win));
-       req->tcb.opt2 = (__force __be32) (PACE(1) |
-               TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
+       req->tcb.opt2 = (__force __be32) (PACE_V(1) |
+               TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
                RX_CHANNEL_V(0) |
-               CCTRL_ECN(enable_ecn) |
+               CCTRL_ECN_V(enable_ecn) |
                RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
        if (enable_tcp_timestamps)
-               req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1);
+               req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
        if (enable_tcp_sack)
-               req->tcb.opt2 |= (__force __be32)SACK_EN(1);
+               req->tcb.opt2 |= (__force __be32)SACK_EN_F;
        if (wscale && enable_tcp_window_scaling)
                req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
        req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
@@ -2023,10 +2023,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
 {
        struct c4iw_ep *ep;
        struct cpl_act_open_rpl *rpl = cplhdr(skb);
-       unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
-                                       ntohl(rpl->atid_status)));
+       unsigned int atid = TID_TID_G(AOPEN_ATID_G(
+                                     ntohl(rpl->atid_status)));
        struct tid_info *t = dev->rdev.lldi.tids;
-       int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
+       int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
        struct sockaddr_in *la;
        struct sockaddr_in *ra;
        struct sockaddr_in6 *la6;
@@ -2064,7 +2064,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
                if (ep->com.local_addr.ss_family == AF_INET &&
                    dev->rdev.lldi.enable_fw_ofld_conn) {
                        send_fw_act_open_req(ep,
-                                            GET_TID_TID(GET_AOPEN_ATID(
+                                            TID_TID_G(AOPEN_ATID_G(
                                             ntohl(rpl->atid_status))));
                        return 0;
                }
@@ -2181,39 +2181,39 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
        win = ep->rcv_win >> 10;
        if (win > RCV_BUFSIZ_M)
                win = RCV_BUFSIZ_M;
-       opt0 = (nocong ? NO_CONG(1) : 0) |
+       opt0 = (nocong ? NO_CONG_F : 0) |
               KEEP_ALIVE_F |
-              DELACK(1) |
+              DELACK_F |
               WND_SCALE_V(wscale) |
               MSS_IDX_V(mtu_idx) |
               L2T_IDX_V(ep->l2t->idx) |
               TX_CHAN_V(ep->tx_chan) |
               SMAC_SEL_V(ep->smac_idx) |
-              DSCP(ep->tos >> 2) |
+              DSCP_V(ep->tos >> 2) |
               ULP_MODE_V(ULP_MODE_TCPDDP) |
               RCV_BUFSIZ_V(win);
        opt2 = RX_CHANNEL_V(0) |
               RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
 
        if (enable_tcp_timestamps && req->tcpopt.tstamp)
-               opt2 |= TSTAMPS_EN(1);
+               opt2 |= TSTAMPS_EN_F;
        if (enable_tcp_sack && req->tcpopt.sack)
-               opt2 |= SACK_EN(1);
+               opt2 |= SACK_EN_F;
        if (wscale && enable_tcp_window_scaling)
                opt2 |= WND_SCALE_EN_F;
        if (enable_ecn) {
                const struct tcphdr *tcph;
                u32 hlen = ntohl(req->hdr_len);
 
-               tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
-                       G_IP_HDR_LEN(hlen);
+               tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
+                       IP_HDR_LEN_G(hlen);
                if (tcph->ece && tcph->cwr)
-                       opt2 |= CCTRL_ECN(1);
+                       opt2 |= CCTRL_ECN_V(1);
        }
        if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
                u32 isn = (prandom_u32() & ~7UL) - 1;
                opt2 |= T5_OPT_2_VALID_F;
-               opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+               opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
                opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
                rpl5 = (void *)rpl;
                memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
@@ -2245,8 +2245,8 @@ static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype,
                       __u8 *local_ip, __u8 *peer_ip,
                       __be16 *local_port, __be16 *peer_port)
 {
-       int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
-       int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
+       int eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+       int ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
        struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
        struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
        struct tcphdr *tcp = (struct tcphdr *)
@@ -2277,7 +2277,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
 {
        struct c4iw_ep *child_ep = NULL, *parent_ep;
        struct cpl_pass_accept_req *req = cplhdr(skb);
-       unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
+       unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
        struct tid_info *t = dev->rdev.lldi.tids;
        unsigned int hwtid = GET_TID(req);
        struct dst_entry *dst;
@@ -2310,14 +2310,14 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                     ntohs(peer_port), peer_mss);
                dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
                                 local_port, peer_port,
-                                GET_POPEN_TOS(ntohl(req->tos_stid)));
+                                PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
        } else {
                PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
                     , __func__, parent_ep, hwtid,
                     local_ip, peer_ip, ntohs(local_port),
                     ntohs(peer_port), peer_mss);
                dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
-                                 PASS_OPEN_TOS(ntohl(req->tos_stid)),
+                                 PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
                                  ((struct sockaddr_in6 *)
                                  &parent_ep->com.local_addr)->sin6_scope_id);
        }
@@ -2375,7 +2375,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        }
        c4iw_get_ep(&parent_ep->com);
        child_ep->parent_ep = parent_ep;
-       child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
+       child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
        child_ep->dst = dst;
        child_ep->hwtid = hwtid;
 
@@ -3500,24 +3500,24 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
 
        req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
        memset(req, 0, sizeof(*req));
-       req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
-                        V_SYN_MAC_IDX(G_RX_MACIDX(
+       req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
+                        SYN_MAC_IDX_V(RX_MACIDX_G(
                         (__force int) htonl(l2info))) |
-                        F_SYN_XACT_MATCH);
+                        SYN_XACT_MATCH_F);
        eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
-                           G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
-                           G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
-       req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
+                           RX_ETHHDR_LEN_G((__force int)htonl(l2info)) :
+                           RX_T5_ETHHDR_LEN_G((__force int)htonl(l2info));
+       req->hdr_len = cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(
                                        (__force int) htonl(l2info))) |
-                                  V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
+                                  TCP_HDR_LEN_V(RX_TCPHDR_LEN_G(
                                        (__force int) htons(hdr_len))) |
-                                  V_IP_HDR_LEN(G_RX_IPHDR_LEN(
+                                  IP_HDR_LEN_V(RX_IPHDR_LEN_G(
                                        (__force int) htons(hdr_len))) |
-                                  V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
+                                  ETH_HDR_LEN_V(RX_ETHHDR_LEN_G(eth_hdr_len)));
        req->vlan = (__force __be16) vlantag;
        req->len = (__force __be16) len;
-       req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
-                                   PASS_OPEN_TOS(tos));
+       req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
+                                   PASS_OPEN_TOS_V(tos));
        req->tcpopt.mss = htons(tmp_opt.mss_clamp);
        if (tmp_opt.wscale_ok)
                req->tcpopt.wsf = tmp_opt.snd_wscale;
@@ -3542,7 +3542,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
        req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
        req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
        memset(req, 0, sizeof(*req));
-       req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
+       req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
        req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
        req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
        req->le.filter = (__force __be32) filter;
@@ -3556,7 +3556,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
                 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
                        FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
                        FW_OFLD_CONNECTION_WR_ASTID_V(
-                       GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
+                       PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
 
        /*
         * We store the qid in opt2 which will be used by the firmware
@@ -3613,7 +3613,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
        struct neighbour *neigh;
 
        /* Drop all non-SYN packets */
-       if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
+       if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
                goto reject;
 
        /*
@@ -3635,8 +3635,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
        }
 
        eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
-                           G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
-                           G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
+                           RX_ETHHDR_LEN_G(htonl(cpl->l2info)) :
+                           RX_T5_ETHHDR_LEN_G(htonl(cpl->l2info));
        if (eth_hdr_len == ETH_HLEN) {
                eh = (struct ethhdr *)(req + 1);
                iph = (struct iphdr *)(eh + 1);
index e9fd3a029296389cc63628319491edf7a2644a15..ab7692ac2044b0a351c10681f08f63759f1934e8 100644 (file)
@@ -52,7 +52,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
        memset(res_wr, 0, wr_len);
        res_wr->op_nres = cpu_to_be32(
                        FW_WR_OP_V(FW_RI_RES_WR) |
-                       V_FW_RI_RES_WR_NRES(1) |
+                       FW_RI_RES_WR_NRES_V(1) |
                        FW_WR_COMPL_F);
        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
        res_wr->cookie = (unsigned long) &wr_wait;
@@ -122,7 +122,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
        memset(res_wr, 0, wr_len);
        res_wr->op_nres = cpu_to_be32(
                        FW_WR_OP_V(FW_RI_RES_WR) |
-                       V_FW_RI_RES_WR_NRES(1) |
+                       FW_RI_RES_WR_NRES_V(1) |
                        FW_WR_COMPL_F);
        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
        res_wr->cookie = (unsigned long) &wr_wait;
@@ -131,17 +131,17 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
        res->u.cq.op = FW_RI_RES_OP_WRITE;
        res->u.cq.iqid = cpu_to_be32(cq->cqid);
        res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
-                       V_FW_RI_RES_WR_IQANUS(0) |
-                       V_FW_RI_RES_WR_IQANUD(1) |
-                       F_FW_RI_RES_WR_IQANDST |
-                       V_FW_RI_RES_WR_IQANDSTINDEX(
+                       FW_RI_RES_WR_IQANUS_V(0) |
+                       FW_RI_RES_WR_IQANUD_V(1) |
+                       FW_RI_RES_WR_IQANDST_F |
+                       FW_RI_RES_WR_IQANDSTINDEX_V(
                                rdev->lldi.ciq_ids[cq->vector]));
        res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
-                       F_FW_RI_RES_WR_IQDROPRSS |
-                       V_FW_RI_RES_WR_IQPCIECH(2) |
-                       V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
-                       F_FW_RI_RES_WR_IQO |
-                       V_FW_RI_RES_WR_IQESIZE(1));
+                       FW_RI_RES_WR_IQDROPRSS_F |
+                       FW_RI_RES_WR_IQPCIECH_V(2) |
+                       FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
+                       FW_RI_RES_WR_IQO_F |
+                       FW_RI_RES_WR_IQESIZE_V(1));
        res->u.cq.iqsize = cpu_to_be16(cq->size);
        res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
 
@@ -182,12 +182,12 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
        PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
             wq, cq, cq->sw_cidx, cq->sw_pidx);
        memset(&cqe, 0, sizeof(cqe));
-       cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
-                                V_CQE_OPCODE(FW_RI_SEND) |
-                                V_CQE_TYPE(0) |
-                                V_CQE_SWCQE(1) |
-                                V_CQE_QPID(wq->sq.qid));
-       cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+       cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+                                CQE_OPCODE_V(FW_RI_SEND) |
+                                CQE_TYPE_V(0) |
+                                CQE_SWCQE_V(1) |
+                                CQE_QPID_V(wq->sq.qid));
+       cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
        cq->sw_queue[cq->sw_pidx] = cqe;
        t4_swcq_produce(cq);
 }
@@ -215,13 +215,13 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
        PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
             wq, cq, cq->sw_cidx, cq->sw_pidx);
        memset(&cqe, 0, sizeof(cqe));
-       cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
-                                V_CQE_OPCODE(swcqe->opcode) |
-                                V_CQE_TYPE(1) |
-                                V_CQE_SWCQE(1) |
-                                V_CQE_QPID(wq->sq.qid));
+       cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+                                CQE_OPCODE_V(swcqe->opcode) |
+                                CQE_TYPE_V(1) |
+                                CQE_SWCQE_V(1) |
+                                CQE_QPID_V(wq->sq.qid));
        CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
-       cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+       cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
        cq->sw_queue[cq->sw_pidx] = cqe;
        t4_swcq_produce(cq);
 }
@@ -284,7 +284,7 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
                         */
                        PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
                                        __func__, cidx, cq->sw_pidx);
-                       swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
+                       swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
                        cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
                        t4_swcq_produce(cq);
                        swsqe->flushed = 1;
@@ -301,10 +301,10 @@ static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
 {
        read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
        read_cqe->len = htonl(wq->sq.oldest_read->read_len);
-       read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
-                       V_CQE_SWCQE(SW_CQE(hw_cqe)) |
-                       V_CQE_OPCODE(FW_RI_READ_REQ) |
-                       V_CQE_TYPE(1));
+       read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
+                       CQE_SWCQE_V(SW_CQE(hw_cqe)) |
+                       CQE_OPCODE_V(FW_RI_READ_REQ) |
+                       CQE_TYPE_V(1));
        read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
 }
 
@@ -400,7 +400,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
                } else {
                        swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
                        *swcqe = *hw_cqe;
-                       swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
+                       swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
                        t4_swcq_produce(&chp->cq);
                }
 next_cqe:
@@ -576,7 +576,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
                }
                if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
                        t4_set_wq_in_error(wq);
-                       hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
+                       hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
                        goto proc_cqe;
                }
                goto proc_cqe;
index eb5df4e62703d8adb254a9a4f6163a90d40ebd18..aafdbcd84fc4b93be3234105240a4c5ff7137bc2 100644 (file)
@@ -380,12 +380,12 @@ static int dump_stag(int id, void *p, void *data)
                      "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
                      "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
                      (u32)id<<8,
-                     G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
-                     G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
-                     G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
-                     G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
-                     G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
-                     G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
+                     FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
+                     FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
+                     FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
+                     FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
+                     FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
+                     FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
                      ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
                      ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
        if (cc < space)
index c9df0549f51dc0921eede052f8cf0753ce83e0df..794555dc86a598a78125edc38a01299157e9caeb 100644 (file)
@@ -50,12 +50,12 @@ static void print_tpte(struct c4iw_dev *dev, u32 stag)
        PDBG("stag idx 0x%x valid %d key 0x%x state %d pdid %d "
               "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
               stag & 0xffffff00,
-              G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
-              G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
-              G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
-              G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
-              G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
-              G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
+              FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
+              FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
+              FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
+              FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
+              FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
+              FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
               ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
               ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
 }
index cb43c2299ac00b94ec4252a6074013ecf2730955..6791fd16272c46f1d23efa3f585994aa0fd7a554 100644 (file)
@@ -86,14 +86,14 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
        req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
        req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
        req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
-       req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
+       req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1));
        req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
        req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
        req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
 
        sgl = (struct ulptx_sgl *)(req + 1);
        sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
-                                   ULPTX_NSGE(1));
+                                   ULPTX_NSGE_V(1));
        sgl->len0 = cpu_to_be32(len);
        sgl->addr0 = cpu_to_be64(data);
 
@@ -286,17 +286,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
        if (reset_tpt_entry)
                memset(&tpt, 0, sizeof(tpt));
        else {
-               tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
-                       V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
-                       V_FW_RI_TPTE_STAGSTATE(stag_state) |
-                       V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
-               tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
-                       (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
-                       V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
+               tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+                       FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
+                       FW_RI_TPTE_STAGSTATE_V(stag_state) |
+                       FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
+               tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
+                       (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
+                       FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
                                                      FW_RI_VA_BASED_TO))|
-                       V_FW_RI_TPTE_PS(page_size));
+                       FW_RI_TPTE_PS_V(page_size));
                tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
-                       V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
+                       FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
                tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
                tpt.va_hi = cpu_to_be32((u32)(to >> 32));
                tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
index bb85d479e66eec254fa5c389e9dc4cc335248bb4..15cae5a3101851ed1a0cb049ff43987651a8955c 100644 (file)
@@ -272,7 +272,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        memset(res_wr, 0, wr_len);
        res_wr->op_nres = cpu_to_be32(
                        FW_WR_OP_V(FW_RI_RES_WR) |
-                       V_FW_RI_RES_WR_NRES(2) |
+                       FW_RI_RES_WR_NRES_V(2) |
                        FW_WR_COMPL_F);
        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
        res_wr->cookie = (unsigned long) &wr_wait;
@@ -287,19 +287,19 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                rdev->hw_queue.t4_eq_status_entries;
 
        res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
-               V_FW_RI_RES_WR_HOSTFCMODE(0) |  /* no host cidx updates */
-               V_FW_RI_RES_WR_CPRIO(0) |       /* don't keep in chip cache */
-               V_FW_RI_RES_WR_PCIECHN(0) |     /* set by uP at ri_init time */
-               (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
-               V_FW_RI_RES_WR_IQID(scq->cqid));
+               FW_RI_RES_WR_HOSTFCMODE_V(0) |  /* no host cidx updates */
+               FW_RI_RES_WR_CPRIO_V(0) |       /* don't keep in chip cache */
+               FW_RI_RES_WR_PCIECHN_V(0) |     /* set by uP at ri_init time */
+               (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
+               FW_RI_RES_WR_IQID_V(scq->cqid));
        res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
-               V_FW_RI_RES_WR_DCAEN(0) |
-               V_FW_RI_RES_WR_DCACPU(0) |
-               V_FW_RI_RES_WR_FBMIN(2) |
-               V_FW_RI_RES_WR_FBMAX(2) |
-               V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
-               V_FW_RI_RES_WR_CIDXFTHRESH(0) |
-               V_FW_RI_RES_WR_EQSIZE(eqsize));
+               FW_RI_RES_WR_DCAEN_V(0) |
+               FW_RI_RES_WR_DCACPU_V(0) |
+               FW_RI_RES_WR_FBMIN_V(2) |
+               FW_RI_RES_WR_FBMAX_V(2) |
+               FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+               FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+               FW_RI_RES_WR_EQSIZE_V(eqsize));
        res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
        res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
        res++;
@@ -312,18 +312,18 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
                rdev->hw_queue.t4_eq_status_entries;
        res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
-               V_FW_RI_RES_WR_HOSTFCMODE(0) |  /* no host cidx updates */
-               V_FW_RI_RES_WR_CPRIO(0) |       /* don't keep in chip cache */
-               V_FW_RI_RES_WR_PCIECHN(0) |     /* set by uP at ri_init time */
-               V_FW_RI_RES_WR_IQID(rcq->cqid));
+               FW_RI_RES_WR_HOSTFCMODE_V(0) |  /* no host cidx updates */
+               FW_RI_RES_WR_CPRIO_V(0) |       /* don't keep in chip cache */
+               FW_RI_RES_WR_PCIECHN_V(0) |     /* set by uP at ri_init time */
+               FW_RI_RES_WR_IQID_V(rcq->cqid));
        res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
-               V_FW_RI_RES_WR_DCAEN(0) |
-               V_FW_RI_RES_WR_DCACPU(0) |
-               V_FW_RI_RES_WR_FBMIN(2) |
-               V_FW_RI_RES_WR_FBMAX(2) |
-               V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
-               V_FW_RI_RES_WR_CIDXFTHRESH(0) |
-               V_FW_RI_RES_WR_EQSIZE(eqsize));
+               FW_RI_RES_WR_DCAEN_V(0) |
+               FW_RI_RES_WR_DCACPU_V(0) |
+               FW_RI_RES_WR_FBMIN_V(2) |
+               FW_RI_RES_WR_FBMAX_V(2) |
+               FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+               FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+               FW_RI_RES_WR_EQSIZE_V(eqsize));
        res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
        res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
 
@@ -444,19 +444,19 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
        case IB_WR_SEND:
                if (wr->send_flags & IB_SEND_SOLICITED)
                        wqe->send.sendop_pkd = cpu_to_be32(
-                               V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
+                               FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
                else
                        wqe->send.sendop_pkd = cpu_to_be32(
-                               V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
+                               FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
                wqe->send.stag_inv = 0;
                break;
        case IB_WR_SEND_WITH_INV:
                if (wr->send_flags & IB_SEND_SOLICITED)
                        wqe->send.sendop_pkd = cpu_to_be32(
-                               V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
+                               FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
                else
                        wqe->send.sendop_pkd = cpu_to_be32(
-                               V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
+                               FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
                wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
                break;
 
@@ -1283,8 +1283,8 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
 
        wqe->u.init.type = FW_RI_TYPE_INIT;
        wqe->u.init.mpareqbit_p2ptype =
-               V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
-               V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
+               FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
+               FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
        wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
        if (qhp->attr.mpa_attr.recv_marker_enabled)
                wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
@@ -1776,7 +1776,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                if (mm5) {
                        mm5->key = uresp.ma_sync_key;
                        mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
-                                   + A_PCIE_MA_SYNC) & PAGE_MASK;
+                                   + PCIE_MA_SYNC_A) & PAGE_MASK;
                        mm5->len = PAGE_SIZE;
                        insert_mmap(ucontext, mm5);
                }
index c04e5134b30cb27055740f572403b25cd4c1dc6a..871cdcac7be26a3479f78eb34c3f799f5a8b840f 100644 (file)
@@ -41,7 +41,7 @@
 #define T4_PAGESIZE_MASK 0xffff000  /* 4KB-128MB */
 #define T4_STAG_UNSET 0xffffffff
 #define T4_FW_MAJ 0
-#define A_PCIE_MA_SYNC 0x30b4
+#define PCIE_MA_SYNC_A 0x30b4
 
 struct t4_status_page {
        __be32 rsvd1;   /* flit 0 - hw owns */
@@ -184,44 +184,44 @@ struct t4_cqe {
 
 /* macros for flit 0 of the cqe */
 
-#define S_CQE_QPID        12
-#define M_CQE_QPID        0xFFFFF
-#define G_CQE_QPID(x)     ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
-#define V_CQE_QPID(x)    ((x)<<S_CQE_QPID)
-
-#define S_CQE_SWCQE       11
-#define M_CQE_SWCQE       0x1
-#define G_CQE_SWCQE(x)    ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
-#define V_CQE_SWCQE(x)   ((x)<<S_CQE_SWCQE)
-
-#define S_CQE_STATUS      5
-#define M_CQE_STATUS      0x1F
-#define G_CQE_STATUS(x)   ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
-#define V_CQE_STATUS(x)   ((x)<<S_CQE_STATUS)
-
-#define S_CQE_TYPE        4
-#define M_CQE_TYPE        0x1
-#define G_CQE_TYPE(x)     ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
-#define V_CQE_TYPE(x)     ((x)<<S_CQE_TYPE)
-
-#define S_CQE_OPCODE      0
-#define M_CQE_OPCODE      0xF
-#define G_CQE_OPCODE(x)   ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
-#define V_CQE_OPCODE(x)   ((x)<<S_CQE_OPCODE)
-
-#define SW_CQE(x)         (G_CQE_SWCQE(be32_to_cpu((x)->header)))
-#define CQE_QPID(x)       (G_CQE_QPID(be32_to_cpu((x)->header)))
-#define CQE_TYPE(x)       (G_CQE_TYPE(be32_to_cpu((x)->header)))
+#define CQE_QPID_S        12
+#define CQE_QPID_M        0xFFFFF
+#define CQE_QPID_G(x)     ((((x) >> CQE_QPID_S)) & CQE_QPID_M)
+#define CQE_QPID_V(x)    ((x)<<CQE_QPID_S)
+
+#define CQE_SWCQE_S       11
+#define CQE_SWCQE_M       0x1
+#define CQE_SWCQE_G(x)    ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
+#define CQE_SWCQE_V(x)   ((x)<<CQE_SWCQE_S)
+
+#define CQE_STATUS_S      5
+#define CQE_STATUS_M      0x1F
+#define CQE_STATUS_G(x)   ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
+#define CQE_STATUS_V(x)   ((x)<<CQE_STATUS_S)
+
+#define CQE_TYPE_S        4
+#define CQE_TYPE_M        0x1
+#define CQE_TYPE_G(x)     ((((x) >> CQE_TYPE_S)) & CQE_TYPE_M)
+#define CQE_TYPE_V(x)     ((x)<<CQE_TYPE_S)
+
+#define CQE_OPCODE_S      0
+#define CQE_OPCODE_M      0xF
+#define CQE_OPCODE_G(x)   ((((x) >> CQE_OPCODE_S)) & CQE_OPCODE_M)
+#define CQE_OPCODE_V(x)   ((x)<<CQE_OPCODE_S)
+
+#define SW_CQE(x)         (CQE_SWCQE_G(be32_to_cpu((x)->header)))
+#define CQE_QPID(x)       (CQE_QPID_G(be32_to_cpu((x)->header)))
+#define CQE_TYPE(x)       (CQE_TYPE_G(be32_to_cpu((x)->header)))
 #define SQ_TYPE(x)       (CQE_TYPE((x)))
 #define RQ_TYPE(x)       (!CQE_TYPE((x)))
-#define CQE_STATUS(x)     (G_CQE_STATUS(be32_to_cpu((x)->header)))
-#define CQE_OPCODE(x)     (G_CQE_OPCODE(be32_to_cpu((x)->header)))
+#define CQE_STATUS(x)     (CQE_STATUS_G(be32_to_cpu((x)->header)))
+#define CQE_OPCODE(x)     (CQE_OPCODE_G(be32_to_cpu((x)->header)))
 
 #define CQE_SEND_OPCODE(x)( \
-       (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
-       (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
-       (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
-       (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
+       (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
+       (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
+       (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
+       (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
 
 #define CQE_LEN(x)        (be32_to_cpu((x)->len))
 
@@ -237,25 +237,25 @@ struct t4_cqe {
 #define CQE_WRID_LOW(x)                (be32_to_cpu((x)->u.gen.wrid_low))
 
 /* macros for flit 3 of the cqe */
-#define S_CQE_GENBIT   63
-#define M_CQE_GENBIT   0x1
-#define G_CQE_GENBIT(x)        (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
-#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
+#define CQE_GENBIT_S   63
+#define CQE_GENBIT_M   0x1
+#define CQE_GENBIT_G(x)        (((x) >> CQE_GENBIT_S) & CQE_GENBIT_M)
+#define CQE_GENBIT_V(x) ((x)<<CQE_GENBIT_S)
 
-#define S_CQE_OVFBIT   62
-#define M_CQE_OVFBIT   0x1
-#define G_CQE_OVFBIT(x)        ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
+#define CQE_OVFBIT_S   62
+#define CQE_OVFBIT_M   0x1
+#define CQE_OVFBIT_G(x)        ((((x) >> CQE_OVFBIT_S)) & CQE_OVFBIT_M)
 
-#define S_CQE_IQTYPE   60
-#define M_CQE_IQTYPE   0x3
-#define G_CQE_IQTYPE(x)        ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
+#define CQE_IQTYPE_S   60
+#define CQE_IQTYPE_M   0x3
+#define CQE_IQTYPE_G(x)        ((((x) >> CQE_IQTYPE_S)) & CQE_IQTYPE_M)
 
-#define M_CQE_TS       0x0fffffffffffffffULL
-#define G_CQE_TS(x)    ((x) & M_CQE_TS)
+#define CQE_TS_M       0x0fffffffffffffffULL
+#define CQE_TS_G(x)    ((x) & CQE_TS_M)
 
-#define CQE_OVFBIT(x)  ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
-#define CQE_GENBIT(x)  ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
-#define CQE_TS(x)      (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_OVFBIT(x)  ((unsigned)CQE_OVFBIT_G(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_GENBIT(x)  ((unsigned)CQE_GENBIT_G(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_TS(x)      (CQE_TS_G(be64_to_cpu((x)->bits_type_ts)))
 
 struct t4_swsqe {
        u64                     wr_id;
@@ -465,14 +465,14 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
                } else {
                        PDBG("%s: DB wq->sq.pidx = %d\n",
                             __func__, wq->sq.pidx);
-                       writel(PIDX_T5(inc), wq->sq.udb);
+                       writel(PIDX_T5_V(inc), wq->sq.udb);
                }
 
                /* Flush user doorbell area writes. */
                wmb();
                return;
        }
-       writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
+       writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
 }
 
 static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
@@ -489,14 +489,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
                } else {
                        PDBG("%s: DB wq->rq.pidx = %d\n",
                             __func__, wq->rq.pidx);
-                       writel(PIDX_T5(inc), wq->rq.udb);
+                       writel(PIDX_T5_V(inc), wq->rq.udb);
                }
 
                /* Flush user doorbell area writes. */
                wmb();
                return;
        }
-       writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
+       writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
 }
 
 static inline int t4_wq_in_error(struct t4_wq *wq)
@@ -561,14 +561,14 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
        u32 val;
 
        set_bit(CQ_ARMED, &cq->flags);
-       while (cq->cidx_inc > CIDXINC_MASK) {
-               val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
-                     INGRESSQID(cq->cqid);
+       while (cq->cidx_inc > CIDXINC_M) {
+               val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
+                     INGRESSQID_V(cq->cqid);
                writel(val, cq->gts);
-               cq->cidx_inc -= CIDXINC_MASK;
+               cq->cidx_inc -= CIDXINC_M;
        }
-       val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
-             INGRESSQID(cq->cqid);
+       val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
+             INGRESSQID_V(cq->cqid);
        writel(val, cq->gts);
        cq->cidx_inc = 0;
        return 0;
@@ -597,11 +597,11 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
 static inline void t4_hwcq_consume(struct t4_cq *cq)
 {
        cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
-       if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) {
+       if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) {
                u32 val;
 
-               val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
-                     INGRESSQID(cq->cqid);
+               val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
+                     INGRESSQID_V(cq->cqid);
                writel(val, cq->gts);
                cq->cidx_inc = 0;
        }
index 5709e77faf7cb9d94a5d4e60f7f9af3d8b6efddb..5e53327fc6476b678227609bee4aee90bbb0c7c0 100644 (file)
@@ -162,102 +162,102 @@ struct fw_ri_tpte {
        __be32 len_hi;
 };
 
-#define S_FW_RI_TPTE_VALID             31
-#define M_FW_RI_TPTE_VALID             0x1
-#define V_FW_RI_TPTE_VALID(x)          ((x) << S_FW_RI_TPTE_VALID)
-#define G_FW_RI_TPTE_VALID(x)          \
-    (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID)
-#define F_FW_RI_TPTE_VALID             V_FW_RI_TPTE_VALID(1U)
-
-#define S_FW_RI_TPTE_STAGKEY           23
-#define M_FW_RI_TPTE_STAGKEY           0xff
-#define V_FW_RI_TPTE_STAGKEY(x)                ((x) << S_FW_RI_TPTE_STAGKEY)
-#define G_FW_RI_TPTE_STAGKEY(x)                \
-    (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY)
-
-#define S_FW_RI_TPTE_STAGSTATE         22
-#define M_FW_RI_TPTE_STAGSTATE         0x1
-#define V_FW_RI_TPTE_STAGSTATE(x)      ((x) << S_FW_RI_TPTE_STAGSTATE)
-#define G_FW_RI_TPTE_STAGSTATE(x)      \
-    (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE)
-#define F_FW_RI_TPTE_STAGSTATE         V_FW_RI_TPTE_STAGSTATE(1U)
-
-#define S_FW_RI_TPTE_STAGTYPE          20
-#define M_FW_RI_TPTE_STAGTYPE          0x3
-#define V_FW_RI_TPTE_STAGTYPE(x)       ((x) << S_FW_RI_TPTE_STAGTYPE)
-#define G_FW_RI_TPTE_STAGTYPE(x)       \
-    (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE)
-
-#define S_FW_RI_TPTE_PDID              0
-#define M_FW_RI_TPTE_PDID              0xfffff
-#define V_FW_RI_TPTE_PDID(x)           ((x) << S_FW_RI_TPTE_PDID)
-#define G_FW_RI_TPTE_PDID(x)           \
-    (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID)
-
-#define S_FW_RI_TPTE_PERM              28
-#define M_FW_RI_TPTE_PERM              0xf
-#define V_FW_RI_TPTE_PERM(x)           ((x) << S_FW_RI_TPTE_PERM)
-#define G_FW_RI_TPTE_PERM(x)           \
-    (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM)
-
-#define S_FW_RI_TPTE_REMINVDIS         27
-#define M_FW_RI_TPTE_REMINVDIS         0x1
-#define V_FW_RI_TPTE_REMINVDIS(x)      ((x) << S_FW_RI_TPTE_REMINVDIS)
-#define G_FW_RI_TPTE_REMINVDIS(x)      \
-    (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS)
-#define F_FW_RI_TPTE_REMINVDIS         V_FW_RI_TPTE_REMINVDIS(1U)
-
-#define S_FW_RI_TPTE_ADDRTYPE          26
-#define M_FW_RI_TPTE_ADDRTYPE          1
-#define V_FW_RI_TPTE_ADDRTYPE(x)       ((x) << S_FW_RI_TPTE_ADDRTYPE)
-#define G_FW_RI_TPTE_ADDRTYPE(x)       \
-    (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE)
-#define F_FW_RI_TPTE_ADDRTYPE          V_FW_RI_TPTE_ADDRTYPE(1U)
-
-#define S_FW_RI_TPTE_MWBINDEN          25
-#define M_FW_RI_TPTE_MWBINDEN          0x1
-#define V_FW_RI_TPTE_MWBINDEN(x)       ((x) << S_FW_RI_TPTE_MWBINDEN)
-#define G_FW_RI_TPTE_MWBINDEN(x)       \
-    (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN)
-#define F_FW_RI_TPTE_MWBINDEN          V_FW_RI_TPTE_MWBINDEN(1U)
-
-#define S_FW_RI_TPTE_PS                        20
-#define M_FW_RI_TPTE_PS                        0x1f
-#define V_FW_RI_TPTE_PS(x)             ((x) << S_FW_RI_TPTE_PS)
-#define G_FW_RI_TPTE_PS(x)             \
-    (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS)
-
-#define S_FW_RI_TPTE_QPID              0
-#define M_FW_RI_TPTE_QPID              0xfffff
-#define V_FW_RI_TPTE_QPID(x)           ((x) << S_FW_RI_TPTE_QPID)
-#define G_FW_RI_TPTE_QPID(x)           \
-    (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID)
-
-#define S_FW_RI_TPTE_NOSNOOP           30
-#define M_FW_RI_TPTE_NOSNOOP           0x1
-#define V_FW_RI_TPTE_NOSNOOP(x)                ((x) << S_FW_RI_TPTE_NOSNOOP)
-#define G_FW_RI_TPTE_NOSNOOP(x)                \
-    (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP)
-#define F_FW_RI_TPTE_NOSNOOP           V_FW_RI_TPTE_NOSNOOP(1U)
-
-#define S_FW_RI_TPTE_PBLADDR           0
-#define M_FW_RI_TPTE_PBLADDR           0x1fffffff
-#define V_FW_RI_TPTE_PBLADDR(x)                ((x) << S_FW_RI_TPTE_PBLADDR)
-#define G_FW_RI_TPTE_PBLADDR(x)                \
-    (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR)
-
-#define S_FW_RI_TPTE_DCA               24
-#define M_FW_RI_TPTE_DCA               0x1f
-#define V_FW_RI_TPTE_DCA(x)            ((x) << S_FW_RI_TPTE_DCA)
-#define G_FW_RI_TPTE_DCA(x)            \
-    (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA)
-
-#define S_FW_RI_TPTE_MWBCNT_PSTAG      0
-#define M_FW_RI_TPTE_MWBCNT_PSTAG      0xffffff
-#define V_FW_RI_TPTE_MWBCNT_PSTAT(x)   \
-    ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG)
-#define G_FW_RI_TPTE_MWBCNT_PSTAG(x)   \
-    (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG)
+#define FW_RI_TPTE_VALID_S             31
+#define FW_RI_TPTE_VALID_M             0x1
+#define FW_RI_TPTE_VALID_V(x)          ((x) << FW_RI_TPTE_VALID_S)
+#define FW_RI_TPTE_VALID_G(x)          \
+       (((x) >> FW_RI_TPTE_VALID_S) & FW_RI_TPTE_VALID_M)
+#define FW_RI_TPTE_VALID_F             FW_RI_TPTE_VALID_V(1U)
+
+#define FW_RI_TPTE_STAGKEY_S           23
+#define FW_RI_TPTE_STAGKEY_M           0xff
+#define FW_RI_TPTE_STAGKEY_V(x)                ((x) << FW_RI_TPTE_STAGKEY_S)
+#define FW_RI_TPTE_STAGKEY_G(x)                \
+       (((x) >> FW_RI_TPTE_STAGKEY_S) & FW_RI_TPTE_STAGKEY_M)
+
+#define FW_RI_TPTE_STAGSTATE_S         22
+#define FW_RI_TPTE_STAGSTATE_M         0x1
+#define FW_RI_TPTE_STAGSTATE_V(x)      ((x) << FW_RI_TPTE_STAGSTATE_S)
+#define FW_RI_TPTE_STAGSTATE_G(x)      \
+       (((x) >> FW_RI_TPTE_STAGSTATE_S) & FW_RI_TPTE_STAGSTATE_M)
+#define FW_RI_TPTE_STAGSTATE_F         FW_RI_TPTE_STAGSTATE_V(1U)
+
+#define FW_RI_TPTE_STAGTYPE_S          20
+#define FW_RI_TPTE_STAGTYPE_M          0x3
+#define FW_RI_TPTE_STAGTYPE_V(x)       ((x) << FW_RI_TPTE_STAGTYPE_S)
+#define FW_RI_TPTE_STAGTYPE_G(x)       \
+       (((x) >> FW_RI_TPTE_STAGTYPE_S) & FW_RI_TPTE_STAGTYPE_M)
+
+#define FW_RI_TPTE_PDID_S              0
+#define FW_RI_TPTE_PDID_M              0xfffff
+#define FW_RI_TPTE_PDID_V(x)           ((x) << FW_RI_TPTE_PDID_S)
+#define FW_RI_TPTE_PDID_G(x)           \
+       (((x) >> FW_RI_TPTE_PDID_S) & FW_RI_TPTE_PDID_M)
+
+#define FW_RI_TPTE_PERM_S              28
+#define FW_RI_TPTE_PERM_M              0xf
+#define FW_RI_TPTE_PERM_V(x)           ((x) << FW_RI_TPTE_PERM_S)
+#define FW_RI_TPTE_PERM_G(x)           \
+       (((x) >> FW_RI_TPTE_PERM_S) & FW_RI_TPTE_PERM_M)
+
+#define FW_RI_TPTE_REMINVDIS_S         27
+#define FW_RI_TPTE_REMINVDIS_M         0x1
+#define FW_RI_TPTE_REMINVDIS_V(x)      ((x) << FW_RI_TPTE_REMINVDIS_S)
+#define FW_RI_TPTE_REMINVDIS_G(x)      \
+       (((x) >> FW_RI_TPTE_REMINVDIS_S) & FW_RI_TPTE_REMINVDIS_M)
+#define FW_RI_TPTE_REMINVDIS_F         FW_RI_TPTE_REMINVDIS_V(1U)
+
+#define FW_RI_TPTE_ADDRTYPE_S          26
+#define FW_RI_TPTE_ADDRTYPE_M          1
+#define FW_RI_TPTE_ADDRTYPE_V(x)       ((x) << FW_RI_TPTE_ADDRTYPE_S)
+#define FW_RI_TPTE_ADDRTYPE_G(x)       \
+       (((x) >> FW_RI_TPTE_ADDRTYPE_S) & FW_RI_TPTE_ADDRTYPE_M)
+#define FW_RI_TPTE_ADDRTYPE_F          FW_RI_TPTE_ADDRTYPE_V(1U)
+
+#define FW_RI_TPTE_MWBINDEN_S          25
+#define FW_RI_TPTE_MWBINDEN_M          0x1
+#define FW_RI_TPTE_MWBINDEN_V(x)       ((x) << FW_RI_TPTE_MWBINDEN_S)
+#define FW_RI_TPTE_MWBINDEN_G(x)       \
+       (((x) >> FW_RI_TPTE_MWBINDEN_S) & FW_RI_TPTE_MWBINDEN_M)
+#define FW_RI_TPTE_MWBINDEN_F          FW_RI_TPTE_MWBINDEN_V(1U)
+
+#define FW_RI_TPTE_PS_S                        20
+#define FW_RI_TPTE_PS_M                        0x1f
+#define FW_RI_TPTE_PS_V(x)             ((x) << FW_RI_TPTE_PS_S)
+#define FW_RI_TPTE_PS_G(x)             \
+       (((x) >> FW_RI_TPTE_PS_S) & FW_RI_TPTE_PS_M)
+
+#define FW_RI_TPTE_QPID_S              0
+#define FW_RI_TPTE_QPID_M              0xfffff
+#define FW_RI_TPTE_QPID_V(x)           ((x) << FW_RI_TPTE_QPID_S)
+#define FW_RI_TPTE_QPID_G(x)           \
+       (((x) >> FW_RI_TPTE_QPID_S) & FW_RI_TPTE_QPID_M)
+
+#define FW_RI_TPTE_NOSNOOP_S           30
+#define FW_RI_TPTE_NOSNOOP_M           0x1
+#define FW_RI_TPTE_NOSNOOP_V(x)                ((x) << FW_RI_TPTE_NOSNOOP_S)
+#define FW_RI_TPTE_NOSNOOP_G(x)                \
+       (((x) >> FW_RI_TPTE_NOSNOOP_S) & FW_RI_TPTE_NOSNOOP_M)
+#define FW_RI_TPTE_NOSNOOP_F           FW_RI_TPTE_NOSNOOP_V(1U)
+
+#define FW_RI_TPTE_PBLADDR_S           0
+#define FW_RI_TPTE_PBLADDR_M           0x1fffffff
+#define FW_RI_TPTE_PBLADDR_V(x)                ((x) << FW_RI_TPTE_PBLADDR_S)
+#define FW_RI_TPTE_PBLADDR_G(x)                \
+       (((x) >> FW_RI_TPTE_PBLADDR_S) & FW_RI_TPTE_PBLADDR_M)
+
+#define FW_RI_TPTE_DCA_S               24
+#define FW_RI_TPTE_DCA_M               0x1f
+#define FW_RI_TPTE_DCA_V(x)            ((x) << FW_RI_TPTE_DCA_S)
+#define FW_RI_TPTE_DCA_G(x)            \
+       (((x) >> FW_RI_TPTE_DCA_S) & FW_RI_TPTE_DCA_M)
+
+#define FW_RI_TPTE_MWBCNT_PSTAG_S      0
+#define FW_RI_TPTE_MWBCNT_PSTAG_M      0xffffff
+#define FW_RI_TPTE_MWBCNT_PSTAT_V(x)   \
+       ((x) << FW_RI_TPTE_MWBCNT_PSTAG_S)
+#define FW_RI_TPTE_MWBCNT_PSTAG_G(x)   \
+       (((x) >> FW_RI_TPTE_MWBCNT_PSTAG_S) & FW_RI_TPTE_MWBCNT_PSTAG_M)
 
 enum fw_ri_res_type {
        FW_RI_RES_TYPE_SQ,
@@ -308,222 +308,222 @@ struct fw_ri_res_wr {
 #endif
 };
 
-#define S_FW_RI_RES_WR_NRES    0
-#define M_FW_RI_RES_WR_NRES    0xff
-#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES)
-#define G_FW_RI_RES_WR_NRES(x) \
-    (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES)
-
-#define S_FW_RI_RES_WR_FETCHSZM                26
-#define M_FW_RI_RES_WR_FETCHSZM                0x1
-#define V_FW_RI_RES_WR_FETCHSZM(x)     ((x) << S_FW_RI_RES_WR_FETCHSZM)
-#define G_FW_RI_RES_WR_FETCHSZM(x)     \
-    (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM)
-#define F_FW_RI_RES_WR_FETCHSZM        V_FW_RI_RES_WR_FETCHSZM(1U)
-
-#define S_FW_RI_RES_WR_STATUSPGNS      25
-#define M_FW_RI_RES_WR_STATUSPGNS      0x1
-#define V_FW_RI_RES_WR_STATUSPGNS(x)   ((x) << S_FW_RI_RES_WR_STATUSPGNS)
-#define G_FW_RI_RES_WR_STATUSPGNS(x)   \
-    (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS)
-#define F_FW_RI_RES_WR_STATUSPGNS      V_FW_RI_RES_WR_STATUSPGNS(1U)
-
-#define S_FW_RI_RES_WR_STATUSPGRO      24
-#define M_FW_RI_RES_WR_STATUSPGRO      0x1
-#define V_FW_RI_RES_WR_STATUSPGRO(x)   ((x) << S_FW_RI_RES_WR_STATUSPGRO)
-#define G_FW_RI_RES_WR_STATUSPGRO(x)   \
-    (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO)
-#define F_FW_RI_RES_WR_STATUSPGRO      V_FW_RI_RES_WR_STATUSPGRO(1U)
-
-#define S_FW_RI_RES_WR_FETCHNS         23
-#define M_FW_RI_RES_WR_FETCHNS         0x1
-#define V_FW_RI_RES_WR_FETCHNS(x)      ((x) << S_FW_RI_RES_WR_FETCHNS)
-#define G_FW_RI_RES_WR_FETCHNS(x)      \
-    (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS)
-#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U)
-
-#define S_FW_RI_RES_WR_FETCHRO         22
-#define M_FW_RI_RES_WR_FETCHRO         0x1
-#define V_FW_RI_RES_WR_FETCHRO(x)      ((x) << S_FW_RI_RES_WR_FETCHRO)
-#define G_FW_RI_RES_WR_FETCHRO(x)      \
-    (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO)
-#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U)
-
-#define S_FW_RI_RES_WR_HOSTFCMODE      20
-#define M_FW_RI_RES_WR_HOSTFCMODE      0x3
-#define V_FW_RI_RES_WR_HOSTFCMODE(x)   ((x) << S_FW_RI_RES_WR_HOSTFCMODE)
-#define G_FW_RI_RES_WR_HOSTFCMODE(x)   \
-    (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE)
-
-#define S_FW_RI_RES_WR_CPRIO   19
-#define M_FW_RI_RES_WR_CPRIO   0x1
-#define V_FW_RI_RES_WR_CPRIO(x)        ((x) << S_FW_RI_RES_WR_CPRIO)
-#define G_FW_RI_RES_WR_CPRIO(x)        \
-    (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO)
-#define F_FW_RI_RES_WR_CPRIO   V_FW_RI_RES_WR_CPRIO(1U)
-
-#define S_FW_RI_RES_WR_ONCHIP          18
-#define M_FW_RI_RES_WR_ONCHIP          0x1
-#define V_FW_RI_RES_WR_ONCHIP(x)       ((x) << S_FW_RI_RES_WR_ONCHIP)
-#define G_FW_RI_RES_WR_ONCHIP(x)       \
-    (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP)
-#define F_FW_RI_RES_WR_ONCHIP  V_FW_RI_RES_WR_ONCHIP(1U)
-
-#define S_FW_RI_RES_WR_PCIECHN         16
-#define M_FW_RI_RES_WR_PCIECHN         0x3
-#define V_FW_RI_RES_WR_PCIECHN(x)      ((x) << S_FW_RI_RES_WR_PCIECHN)
-#define G_FW_RI_RES_WR_PCIECHN(x)      \
-    (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN)
-
-#define S_FW_RI_RES_WR_IQID    0
-#define M_FW_RI_RES_WR_IQID    0xffff
-#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID)
-#define G_FW_RI_RES_WR_IQID(x) \
-    (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID)
-
-#define S_FW_RI_RES_WR_DCAEN   31
-#define M_FW_RI_RES_WR_DCAEN   0x1
-#define V_FW_RI_RES_WR_DCAEN(x)        ((x) << S_FW_RI_RES_WR_DCAEN)
-#define G_FW_RI_RES_WR_DCAEN(x)        \
-    (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN)
-#define F_FW_RI_RES_WR_DCAEN   V_FW_RI_RES_WR_DCAEN(1U)
-
-#define S_FW_RI_RES_WR_DCACPU          26
-#define M_FW_RI_RES_WR_DCACPU          0x1f
-#define V_FW_RI_RES_WR_DCACPU(x)       ((x) << S_FW_RI_RES_WR_DCACPU)
-#define G_FW_RI_RES_WR_DCACPU(x)       \
-    (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU)
-
-#define S_FW_RI_RES_WR_FBMIN   23
-#define M_FW_RI_RES_WR_FBMIN   0x7
-#define V_FW_RI_RES_WR_FBMIN(x)        ((x) << S_FW_RI_RES_WR_FBMIN)
-#define G_FW_RI_RES_WR_FBMIN(x)        \
-    (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN)
-
-#define S_FW_RI_RES_WR_FBMAX   20
-#define M_FW_RI_RES_WR_FBMAX   0x7
-#define V_FW_RI_RES_WR_FBMAX(x)        ((x) << S_FW_RI_RES_WR_FBMAX)
-#define G_FW_RI_RES_WR_FBMAX(x)        \
-    (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX)
-
-#define S_FW_RI_RES_WR_CIDXFTHRESHO    19
-#define M_FW_RI_RES_WR_CIDXFTHRESHO    0x1
-#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO)
-#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \
-    (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO)
-#define F_FW_RI_RES_WR_CIDXFTHRESHO    V_FW_RI_RES_WR_CIDXFTHRESHO(1U)
-
-#define S_FW_RI_RES_WR_CIDXFTHRESH     16
-#define M_FW_RI_RES_WR_CIDXFTHRESH     0x7
-#define V_FW_RI_RES_WR_CIDXFTHRESH(x)  ((x) << S_FW_RI_RES_WR_CIDXFTHRESH)
-#define G_FW_RI_RES_WR_CIDXFTHRESH(x)  \
-    (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH)
-
-#define S_FW_RI_RES_WR_EQSIZE          0
-#define M_FW_RI_RES_WR_EQSIZE          0xffff
-#define V_FW_RI_RES_WR_EQSIZE(x)       ((x) << S_FW_RI_RES_WR_EQSIZE)
-#define G_FW_RI_RES_WR_EQSIZE(x)       \
-    (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE)
-
-#define S_FW_RI_RES_WR_IQANDST         15
-#define M_FW_RI_RES_WR_IQANDST         0x1
-#define V_FW_RI_RES_WR_IQANDST(x)      ((x) << S_FW_RI_RES_WR_IQANDST)
-#define G_FW_RI_RES_WR_IQANDST(x)      \
-    (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST)
-#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U)
-
-#define S_FW_RI_RES_WR_IQANUS          14
-#define M_FW_RI_RES_WR_IQANUS          0x1
-#define V_FW_RI_RES_WR_IQANUS(x)       ((x) << S_FW_RI_RES_WR_IQANUS)
-#define G_FW_RI_RES_WR_IQANUS(x)       \
-    (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS)
-#define F_FW_RI_RES_WR_IQANUS  V_FW_RI_RES_WR_IQANUS(1U)
-
-#define S_FW_RI_RES_WR_IQANUD          12
-#define M_FW_RI_RES_WR_IQANUD          0x3
-#define V_FW_RI_RES_WR_IQANUD(x)       ((x) << S_FW_RI_RES_WR_IQANUD)
-#define G_FW_RI_RES_WR_IQANUD(x)       \
-    (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD)
-
-#define S_FW_RI_RES_WR_IQANDSTINDEX    0
-#define M_FW_RI_RES_WR_IQANDSTINDEX    0xfff
-#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX)
-#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \
-    (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX)
-
-#define S_FW_RI_RES_WR_IQDROPRSS       15
-#define M_FW_RI_RES_WR_IQDROPRSS       0x1
-#define V_FW_RI_RES_WR_IQDROPRSS(x)    ((x) << S_FW_RI_RES_WR_IQDROPRSS)
-#define G_FW_RI_RES_WR_IQDROPRSS(x)    \
-    (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS)
-#define F_FW_RI_RES_WR_IQDROPRSS       V_FW_RI_RES_WR_IQDROPRSS(1U)
-
-#define S_FW_RI_RES_WR_IQGTSMODE       14
-#define M_FW_RI_RES_WR_IQGTSMODE       0x1
-#define V_FW_RI_RES_WR_IQGTSMODE(x)    ((x) << S_FW_RI_RES_WR_IQGTSMODE)
-#define G_FW_RI_RES_WR_IQGTSMODE(x)    \
-    (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE)
-#define F_FW_RI_RES_WR_IQGTSMODE       V_FW_RI_RES_WR_IQGTSMODE(1U)
-
-#define S_FW_RI_RES_WR_IQPCIECH                12
-#define M_FW_RI_RES_WR_IQPCIECH                0x3
-#define V_FW_RI_RES_WR_IQPCIECH(x)     ((x) << S_FW_RI_RES_WR_IQPCIECH)
-#define G_FW_RI_RES_WR_IQPCIECH(x)     \
-    (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH)
-
-#define S_FW_RI_RES_WR_IQDCAEN         11
-#define M_FW_RI_RES_WR_IQDCAEN         0x1
-#define V_FW_RI_RES_WR_IQDCAEN(x)      ((x) << S_FW_RI_RES_WR_IQDCAEN)
-#define G_FW_RI_RES_WR_IQDCAEN(x)      \
-    (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN)
-#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U)
-
-#define S_FW_RI_RES_WR_IQDCACPU                6
-#define M_FW_RI_RES_WR_IQDCACPU                0x1f
-#define V_FW_RI_RES_WR_IQDCACPU(x)     ((x) << S_FW_RI_RES_WR_IQDCACPU)
-#define G_FW_RI_RES_WR_IQDCACPU(x)     \
-    (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU)
-
-#define S_FW_RI_RES_WR_IQINTCNTTHRESH          4
-#define M_FW_RI_RES_WR_IQINTCNTTHRESH          0x3
-#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x)       \
-    ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH)
-#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x)       \
-    (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH)
-
-#define S_FW_RI_RES_WR_IQO     3
-#define M_FW_RI_RES_WR_IQO     0x1
-#define V_FW_RI_RES_WR_IQO(x)  ((x) << S_FW_RI_RES_WR_IQO)
-#define G_FW_RI_RES_WR_IQO(x)  \
-    (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO)
-#define F_FW_RI_RES_WR_IQO     V_FW_RI_RES_WR_IQO(1U)
-
-#define S_FW_RI_RES_WR_IQCPRIO         2
-#define M_FW_RI_RES_WR_IQCPRIO         0x1
-#define V_FW_RI_RES_WR_IQCPRIO(x)      ((x) << S_FW_RI_RES_WR_IQCPRIO)
-#define G_FW_RI_RES_WR_IQCPRIO(x)      \
-    (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO)
-#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U)
-
-#define S_FW_RI_RES_WR_IQESIZE         0
-#define M_FW_RI_RES_WR_IQESIZE         0x3
-#define V_FW_RI_RES_WR_IQESIZE(x)      ((x) << S_FW_RI_RES_WR_IQESIZE)
-#define G_FW_RI_RES_WR_IQESIZE(x)      \
-    (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE)
-
-#define S_FW_RI_RES_WR_IQNS    31
-#define M_FW_RI_RES_WR_IQNS    0x1
-#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS)
-#define G_FW_RI_RES_WR_IQNS(x) \
-    (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS)
-#define F_FW_RI_RES_WR_IQNS    V_FW_RI_RES_WR_IQNS(1U)
-
-#define S_FW_RI_RES_WR_IQRO    30
-#define M_FW_RI_RES_WR_IQRO    0x1
-#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO)
-#define G_FW_RI_RES_WR_IQRO(x) \
-    (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO)
-#define F_FW_RI_RES_WR_IQRO    V_FW_RI_RES_WR_IQRO(1U)
+#define FW_RI_RES_WR_NRES_S    0
+#define FW_RI_RES_WR_NRES_M    0xff
+#define FW_RI_RES_WR_NRES_V(x) ((x) << FW_RI_RES_WR_NRES_S)
+#define FW_RI_RES_WR_NRES_G(x) \
+       (((x) >> FW_RI_RES_WR_NRES_S) & FW_RI_RES_WR_NRES_M)
+
+#define FW_RI_RES_WR_FETCHSZM_S                26
+#define FW_RI_RES_WR_FETCHSZM_M                0x1
+#define FW_RI_RES_WR_FETCHSZM_V(x)     ((x) << FW_RI_RES_WR_FETCHSZM_S)
+#define FW_RI_RES_WR_FETCHSZM_G(x)     \
+       (((x) >> FW_RI_RES_WR_FETCHSZM_S) & FW_RI_RES_WR_FETCHSZM_M)
+#define FW_RI_RES_WR_FETCHSZM_F        FW_RI_RES_WR_FETCHSZM_V(1U)
+
+#define FW_RI_RES_WR_STATUSPGNS_S      25
+#define FW_RI_RES_WR_STATUSPGNS_M      0x1
+#define FW_RI_RES_WR_STATUSPGNS_V(x)   ((x) << FW_RI_RES_WR_STATUSPGNS_S)
+#define FW_RI_RES_WR_STATUSPGNS_G(x)   \
+       (((x) >> FW_RI_RES_WR_STATUSPGNS_S) & FW_RI_RES_WR_STATUSPGNS_M)
+#define FW_RI_RES_WR_STATUSPGNS_F      FW_RI_RES_WR_STATUSPGNS_V(1U)
+
+#define FW_RI_RES_WR_STATUSPGRO_S      24
+#define FW_RI_RES_WR_STATUSPGRO_M      0x1
+#define FW_RI_RES_WR_STATUSPGRO_V(x)   ((x) << FW_RI_RES_WR_STATUSPGRO_S)
+#define FW_RI_RES_WR_STATUSPGRO_G(x)   \
+       (((x) >> FW_RI_RES_WR_STATUSPGRO_S) & FW_RI_RES_WR_STATUSPGRO_M)
+#define FW_RI_RES_WR_STATUSPGRO_F      FW_RI_RES_WR_STATUSPGRO_V(1U)
+
+#define FW_RI_RES_WR_FETCHNS_S         23
+#define FW_RI_RES_WR_FETCHNS_M         0x1
+#define FW_RI_RES_WR_FETCHNS_V(x)      ((x) << FW_RI_RES_WR_FETCHNS_S)
+#define FW_RI_RES_WR_FETCHNS_G(x)      \
+       (((x) >> FW_RI_RES_WR_FETCHNS_S) & FW_RI_RES_WR_FETCHNS_M)
+#define FW_RI_RES_WR_FETCHNS_F FW_RI_RES_WR_FETCHNS_V(1U)
+
+#define FW_RI_RES_WR_FETCHRO_S         22
+#define FW_RI_RES_WR_FETCHRO_M         0x1
+#define FW_RI_RES_WR_FETCHRO_V(x)      ((x) << FW_RI_RES_WR_FETCHRO_S)
+#define FW_RI_RES_WR_FETCHRO_G(x)      \
+       (((x) >> FW_RI_RES_WR_FETCHRO_S) & FW_RI_RES_WR_FETCHRO_M)
+#define FW_RI_RES_WR_FETCHRO_F FW_RI_RES_WR_FETCHRO_V(1U)
+
+#define FW_RI_RES_WR_HOSTFCMODE_S      20
+#define FW_RI_RES_WR_HOSTFCMODE_M      0x3
+#define FW_RI_RES_WR_HOSTFCMODE_V(x)   ((x) << FW_RI_RES_WR_HOSTFCMODE_S)
+#define FW_RI_RES_WR_HOSTFCMODE_G(x)   \
+       (((x) >> FW_RI_RES_WR_HOSTFCMODE_S) & FW_RI_RES_WR_HOSTFCMODE_M)
+
+#define FW_RI_RES_WR_CPRIO_S   19
+#define FW_RI_RES_WR_CPRIO_M   0x1
+#define FW_RI_RES_WR_CPRIO_V(x)        ((x) << FW_RI_RES_WR_CPRIO_S)
+#define FW_RI_RES_WR_CPRIO_G(x)        \
+       (((x) >> FW_RI_RES_WR_CPRIO_S) & FW_RI_RES_WR_CPRIO_M)
+#define FW_RI_RES_WR_CPRIO_F   FW_RI_RES_WR_CPRIO_V(1U)
+
+#define FW_RI_RES_WR_ONCHIP_S          18
+#define FW_RI_RES_WR_ONCHIP_M          0x1
+#define FW_RI_RES_WR_ONCHIP_V(x)       ((x) << FW_RI_RES_WR_ONCHIP_S)
+#define FW_RI_RES_WR_ONCHIP_G(x)       \
+       (((x) >> FW_RI_RES_WR_ONCHIP_S) & FW_RI_RES_WR_ONCHIP_M)
+#define FW_RI_RES_WR_ONCHIP_F  FW_RI_RES_WR_ONCHIP_V(1U)
+
+#define FW_RI_RES_WR_PCIECHN_S         16
+#define FW_RI_RES_WR_PCIECHN_M         0x3
+#define FW_RI_RES_WR_PCIECHN_V(x)      ((x) << FW_RI_RES_WR_PCIECHN_S)
+#define FW_RI_RES_WR_PCIECHN_G(x)      \
+       (((x) >> FW_RI_RES_WR_PCIECHN_S) & FW_RI_RES_WR_PCIECHN_M)
+
+#define FW_RI_RES_WR_IQID_S    0
+#define FW_RI_RES_WR_IQID_M    0xffff
+#define FW_RI_RES_WR_IQID_V(x) ((x) << FW_RI_RES_WR_IQID_S)
+#define FW_RI_RES_WR_IQID_G(x) \
+       (((x) >> FW_RI_RES_WR_IQID_S) & FW_RI_RES_WR_IQID_M)
+
+#define FW_RI_RES_WR_DCAEN_S   31
+#define FW_RI_RES_WR_DCAEN_M   0x1
+#define FW_RI_RES_WR_DCAEN_V(x)        ((x) << FW_RI_RES_WR_DCAEN_S)
+#define FW_RI_RES_WR_DCAEN_G(x)        \
+       (((x) >> FW_RI_RES_WR_DCAEN_S) & FW_RI_RES_WR_DCAEN_M)
+#define FW_RI_RES_WR_DCAEN_F   FW_RI_RES_WR_DCAEN_V(1U)
+
+#define FW_RI_RES_WR_DCACPU_S          26
+#define FW_RI_RES_WR_DCACPU_M          0x1f
+#define FW_RI_RES_WR_DCACPU_V(x)       ((x) << FW_RI_RES_WR_DCACPU_S)
+#define FW_RI_RES_WR_DCACPU_G(x)       \
+       (((x) >> FW_RI_RES_WR_DCACPU_S) & FW_RI_RES_WR_DCACPU_M)
+
+#define FW_RI_RES_WR_FBMIN_S   23
+#define FW_RI_RES_WR_FBMIN_M   0x7
+#define FW_RI_RES_WR_FBMIN_V(x)        ((x) << FW_RI_RES_WR_FBMIN_S)
+#define FW_RI_RES_WR_FBMIN_G(x)        \
+       (((x) >> FW_RI_RES_WR_FBMIN_S) & FW_RI_RES_WR_FBMIN_M)
+
+#define FW_RI_RES_WR_FBMAX_S   20
+#define FW_RI_RES_WR_FBMAX_M   0x7
+#define FW_RI_RES_WR_FBMAX_V(x)        ((x) << FW_RI_RES_WR_FBMAX_S)
+#define FW_RI_RES_WR_FBMAX_G(x)        \
+       (((x) >> FW_RI_RES_WR_FBMAX_S) & FW_RI_RES_WR_FBMAX_M)
+
+#define FW_RI_RES_WR_CIDXFTHRESHO_S    19
+#define FW_RI_RES_WR_CIDXFTHRESHO_M    0x1
+#define FW_RI_RES_WR_CIDXFTHRESHO_V(x) ((x) << FW_RI_RES_WR_CIDXFTHRESHO_S)
+#define FW_RI_RES_WR_CIDXFTHRESHO_G(x) \
+       (((x) >> FW_RI_RES_WR_CIDXFTHRESHO_S) & FW_RI_RES_WR_CIDXFTHRESHO_M)
+#define FW_RI_RES_WR_CIDXFTHRESHO_F    FW_RI_RES_WR_CIDXFTHRESHO_V(1U)
+
+#define FW_RI_RES_WR_CIDXFTHRESH_S     16
+#define FW_RI_RES_WR_CIDXFTHRESH_M     0x7
+#define FW_RI_RES_WR_CIDXFTHRESH_V(x)  ((x) << FW_RI_RES_WR_CIDXFTHRESH_S)
+#define FW_RI_RES_WR_CIDXFTHRESH_G(x)  \
+       (((x) >> FW_RI_RES_WR_CIDXFTHRESH_S) & FW_RI_RES_WR_CIDXFTHRESH_M)
+
+#define FW_RI_RES_WR_EQSIZE_S          0
+#define FW_RI_RES_WR_EQSIZE_M          0xffff
+#define FW_RI_RES_WR_EQSIZE_V(x)       ((x) << FW_RI_RES_WR_EQSIZE_S)
+#define FW_RI_RES_WR_EQSIZE_G(x)       \
+       (((x) >> FW_RI_RES_WR_EQSIZE_S) & FW_RI_RES_WR_EQSIZE_M)
+
+#define FW_RI_RES_WR_IQANDST_S         15
+#define FW_RI_RES_WR_IQANDST_M         0x1
+#define FW_RI_RES_WR_IQANDST_V(x)      ((x) << FW_RI_RES_WR_IQANDST_S)
+#define FW_RI_RES_WR_IQANDST_G(x)      \
+       (((x) >> FW_RI_RES_WR_IQANDST_S) & FW_RI_RES_WR_IQANDST_M)
+#define FW_RI_RES_WR_IQANDST_F FW_RI_RES_WR_IQANDST_V(1U)
+
+#define FW_RI_RES_WR_IQANUS_S          14
+#define FW_RI_RES_WR_IQANUS_M          0x1
+#define FW_RI_RES_WR_IQANUS_V(x)       ((x) << FW_RI_RES_WR_IQANUS_S)
+#define FW_RI_RES_WR_IQANUS_G(x)       \
+       (((x) >> FW_RI_RES_WR_IQANUS_S) & FW_RI_RES_WR_IQANUS_M)
+#define FW_RI_RES_WR_IQANUS_F  FW_RI_RES_WR_IQANUS_V(1U)
+
+#define FW_RI_RES_WR_IQANUD_S          12
+#define FW_RI_RES_WR_IQANUD_M          0x3
+#define FW_RI_RES_WR_IQANUD_V(x)       ((x) << FW_RI_RES_WR_IQANUD_S)
+#define FW_RI_RES_WR_IQANUD_G(x)       \
+       (((x) >> FW_RI_RES_WR_IQANUD_S) & FW_RI_RES_WR_IQANUD_M)
+
+#define FW_RI_RES_WR_IQANDSTINDEX_S    0
+#define FW_RI_RES_WR_IQANDSTINDEX_M    0xfff
+#define FW_RI_RES_WR_IQANDSTINDEX_V(x) ((x) << FW_RI_RES_WR_IQANDSTINDEX_S)
+#define FW_RI_RES_WR_IQANDSTINDEX_G(x) \
+       (((x) >> FW_RI_RES_WR_IQANDSTINDEX_S) & FW_RI_RES_WR_IQANDSTINDEX_M)
+
+#define FW_RI_RES_WR_IQDROPRSS_S       15
+#define FW_RI_RES_WR_IQDROPRSS_M       0x1
+#define FW_RI_RES_WR_IQDROPRSS_V(x)    ((x) << FW_RI_RES_WR_IQDROPRSS_S)
+#define FW_RI_RES_WR_IQDROPRSS_G(x)    \
+       (((x) >> FW_RI_RES_WR_IQDROPRSS_S) & FW_RI_RES_WR_IQDROPRSS_M)
+#define FW_RI_RES_WR_IQDROPRSS_F       FW_RI_RES_WR_IQDROPRSS_V(1U)
+
+#define FW_RI_RES_WR_IQGTSMODE_S       14
+#define FW_RI_RES_WR_IQGTSMODE_M       0x1
+#define FW_RI_RES_WR_IQGTSMODE_V(x)    ((x) << FW_RI_RES_WR_IQGTSMODE_S)
+#define FW_RI_RES_WR_IQGTSMODE_G(x)    \
+       (((x) >> FW_RI_RES_WR_IQGTSMODE_S) & FW_RI_RES_WR_IQGTSMODE_M)
+#define FW_RI_RES_WR_IQGTSMODE_F       FW_RI_RES_WR_IQGTSMODE_V(1U)
+
+#define FW_RI_RES_WR_IQPCIECH_S                12
+#define FW_RI_RES_WR_IQPCIECH_M                0x3
+#define FW_RI_RES_WR_IQPCIECH_V(x)     ((x) << FW_RI_RES_WR_IQPCIECH_S)
+#define FW_RI_RES_WR_IQPCIECH_G(x)     \
+       (((x) >> FW_RI_RES_WR_IQPCIECH_S) & FW_RI_RES_WR_IQPCIECH_M)
+
+#define FW_RI_RES_WR_IQDCAEN_S         11
+#define FW_RI_RES_WR_IQDCAEN_M         0x1
+#define FW_RI_RES_WR_IQDCAEN_V(x)      ((x) << FW_RI_RES_WR_IQDCAEN_S)
+#define FW_RI_RES_WR_IQDCAEN_G(x)      \
+       (((x) >> FW_RI_RES_WR_IQDCAEN_S) & FW_RI_RES_WR_IQDCAEN_M)
+#define FW_RI_RES_WR_IQDCAEN_F FW_RI_RES_WR_IQDCAEN_V(1U)
+
+#define FW_RI_RES_WR_IQDCACPU_S                6
+#define FW_RI_RES_WR_IQDCACPU_M                0x1f
+#define FW_RI_RES_WR_IQDCACPU_V(x)     ((x) << FW_RI_RES_WR_IQDCACPU_S)
+#define FW_RI_RES_WR_IQDCACPU_G(x)     \
+       (((x) >> FW_RI_RES_WR_IQDCACPU_S) & FW_RI_RES_WR_IQDCACPU_M)
+
+#define FW_RI_RES_WR_IQINTCNTTHRESH_S          4
+#define FW_RI_RES_WR_IQINTCNTTHRESH_M          0x3
+#define FW_RI_RES_WR_IQINTCNTTHRESH_V(x)       \
+       ((x) << FW_RI_RES_WR_IQINTCNTTHRESH_S)
+#define FW_RI_RES_WR_IQINTCNTTHRESH_G(x)       \
+       (((x) >> FW_RI_RES_WR_IQINTCNTTHRESH_S) & FW_RI_RES_WR_IQINTCNTTHRESH_M)
+
+#define FW_RI_RES_WR_IQO_S     3
+#define FW_RI_RES_WR_IQO_M     0x1
+#define FW_RI_RES_WR_IQO_V(x)  ((x) << FW_RI_RES_WR_IQO_S)
+#define FW_RI_RES_WR_IQO_G(x)  \
+       (((x) >> FW_RI_RES_WR_IQO_S) & FW_RI_RES_WR_IQO_M)
+#define FW_RI_RES_WR_IQO_F     FW_RI_RES_WR_IQO_V(1U)
+
+#define FW_RI_RES_WR_IQCPRIO_S         2
+#define FW_RI_RES_WR_IQCPRIO_M         0x1
+#define FW_RI_RES_WR_IQCPRIO_V(x)      ((x) << FW_RI_RES_WR_IQCPRIO_S)
+#define FW_RI_RES_WR_IQCPRIO_G(x)      \
+       (((x) >> FW_RI_RES_WR_IQCPRIO_S) & FW_RI_RES_WR_IQCPRIO_M)
+#define FW_RI_RES_WR_IQCPRIO_F FW_RI_RES_WR_IQCPRIO_V(1U)
+
+#define FW_RI_RES_WR_IQESIZE_S         0
+#define FW_RI_RES_WR_IQESIZE_M         0x3
+#define FW_RI_RES_WR_IQESIZE_V(x)      ((x) << FW_RI_RES_WR_IQESIZE_S)
+#define FW_RI_RES_WR_IQESIZE_G(x)      \
+       (((x) >> FW_RI_RES_WR_IQESIZE_S) & FW_RI_RES_WR_IQESIZE_M)
+
+#define FW_RI_RES_WR_IQNS_S    31
+#define FW_RI_RES_WR_IQNS_M    0x1
+#define FW_RI_RES_WR_IQNS_V(x) ((x) << FW_RI_RES_WR_IQNS_S)
+#define FW_RI_RES_WR_IQNS_G(x) \
+       (((x) >> FW_RI_RES_WR_IQNS_S) & FW_RI_RES_WR_IQNS_M)
+#define FW_RI_RES_WR_IQNS_F    FW_RI_RES_WR_IQNS_V(1U)
+
+#define FW_RI_RES_WR_IQRO_S    30
+#define FW_RI_RES_WR_IQRO_M    0x1
+#define FW_RI_RES_WR_IQRO_V(x) ((x) << FW_RI_RES_WR_IQRO_S)
+#define FW_RI_RES_WR_IQRO_G(x) \
+       (((x) >> FW_RI_RES_WR_IQRO_S) & FW_RI_RES_WR_IQRO_M)
+#define FW_RI_RES_WR_IQRO_F    FW_RI_RES_WR_IQRO_V(1U)
 
 struct fw_ri_rdma_write_wr {
        __u8   opcode;
@@ -562,11 +562,11 @@ struct fw_ri_send_wr {
 #endif
 };
 
-#define S_FW_RI_SEND_WR_SENDOP         0
-#define M_FW_RI_SEND_WR_SENDOP         0xf
-#define V_FW_RI_SEND_WR_SENDOP(x)      ((x) << S_FW_RI_SEND_WR_SENDOP)
-#define G_FW_RI_SEND_WR_SENDOP(x)      \
-    (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP)
+#define FW_RI_SEND_WR_SENDOP_S         0
+#define FW_RI_SEND_WR_SENDOP_M         0xf
+#define FW_RI_SEND_WR_SENDOP_V(x)      ((x) << FW_RI_SEND_WR_SENDOP_S)
+#define FW_RI_SEND_WR_SENDOP_G(x)      \
+       (((x) >> FW_RI_SEND_WR_SENDOP_S) & FW_RI_SEND_WR_SENDOP_M)
 
 struct fw_ri_rdma_read_wr {
        __u8   opcode;
@@ -612,25 +612,25 @@ struct fw_ri_bind_mw_wr {
        __be64 r4;
 };
 
-#define S_FW_RI_BIND_MW_WR_QPBINDE     6
-#define M_FW_RI_BIND_MW_WR_QPBINDE     0x1
-#define V_FW_RI_BIND_MW_WR_QPBINDE(x)  ((x) << S_FW_RI_BIND_MW_WR_QPBINDE)
-#define G_FW_RI_BIND_MW_WR_QPBINDE(x)  \
-    (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE)
-#define F_FW_RI_BIND_MW_WR_QPBINDE     V_FW_RI_BIND_MW_WR_QPBINDE(1U)
+#define FW_RI_BIND_MW_WR_QPBINDE_S     6
+#define FW_RI_BIND_MW_WR_QPBINDE_M     0x1
+#define FW_RI_BIND_MW_WR_QPBINDE_V(x)  ((x) << FW_RI_BIND_MW_WR_QPBINDE_S)
+#define FW_RI_BIND_MW_WR_QPBINDE_G(x)  \
+       (((x) >> FW_RI_BIND_MW_WR_QPBINDE_S) & FW_RI_BIND_MW_WR_QPBINDE_M)
+#define FW_RI_BIND_MW_WR_QPBINDE_F     FW_RI_BIND_MW_WR_QPBINDE_V(1U)
 
-#define S_FW_RI_BIND_MW_WR_NS          5
-#define M_FW_RI_BIND_MW_WR_NS          0x1
-#define V_FW_RI_BIND_MW_WR_NS(x)       ((x) << S_FW_RI_BIND_MW_WR_NS)
-#define G_FW_RI_BIND_MW_WR_NS(x)       \
-    (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS)
-#define F_FW_RI_BIND_MW_WR_NS  V_FW_RI_BIND_MW_WR_NS(1U)
+#define FW_RI_BIND_MW_WR_NS_S          5
+#define FW_RI_BIND_MW_WR_NS_M          0x1
+#define FW_RI_BIND_MW_WR_NS_V(x)       ((x) << FW_RI_BIND_MW_WR_NS_S)
+#define FW_RI_BIND_MW_WR_NS_G(x)       \
+       (((x) >> FW_RI_BIND_MW_WR_NS_S) & FW_RI_BIND_MW_WR_NS_M)
+#define FW_RI_BIND_MW_WR_NS_F  FW_RI_BIND_MW_WR_NS_V(1U)
 
-#define S_FW_RI_BIND_MW_WR_DCACPU      0
-#define M_FW_RI_BIND_MW_WR_DCACPU      0x1f
-#define V_FW_RI_BIND_MW_WR_DCACPU(x)   ((x) << S_FW_RI_BIND_MW_WR_DCACPU)
-#define G_FW_RI_BIND_MW_WR_DCACPU(x)   \
-    (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU)
+#define FW_RI_BIND_MW_WR_DCACPU_S      0
+#define FW_RI_BIND_MW_WR_DCACPU_M      0x1f
+#define FW_RI_BIND_MW_WR_DCACPU_V(x)   ((x) << FW_RI_BIND_MW_WR_DCACPU_S)
+#define FW_RI_BIND_MW_WR_DCACPU_G(x)   \
+       (((x) >> FW_RI_BIND_MW_WR_DCACPU_S) & FW_RI_BIND_MW_WR_DCACPU_M)
 
 struct fw_ri_fr_nsmr_wr {
        __u8   opcode;
@@ -649,25 +649,25 @@ struct fw_ri_fr_nsmr_wr {
        __be32 va_lo_fbo;
 };
 
-#define S_FW_RI_FR_NSMR_WR_QPBINDE     6
-#define M_FW_RI_FR_NSMR_WR_QPBINDE     0x1
-#define V_FW_RI_FR_NSMR_WR_QPBINDE(x)  ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE)
-#define G_FW_RI_FR_NSMR_WR_QPBINDE(x)  \
-    (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE)
-#define F_FW_RI_FR_NSMR_WR_QPBINDE     V_FW_RI_FR_NSMR_WR_QPBINDE(1U)
+#define FW_RI_FR_NSMR_WR_QPBINDE_S     6
+#define FW_RI_FR_NSMR_WR_QPBINDE_M     0x1
+#define FW_RI_FR_NSMR_WR_QPBINDE_V(x)  ((x) << FW_RI_FR_NSMR_WR_QPBINDE_S)
+#define FW_RI_FR_NSMR_WR_QPBINDE_G(x)  \
+       (((x) >> FW_RI_FR_NSMR_WR_QPBINDE_S) & FW_RI_FR_NSMR_WR_QPBINDE_M)
+#define FW_RI_FR_NSMR_WR_QPBINDE_F     FW_RI_FR_NSMR_WR_QPBINDE_V(1U)
 
-#define S_FW_RI_FR_NSMR_WR_NS          5
-#define M_FW_RI_FR_NSMR_WR_NS          0x1
-#define V_FW_RI_FR_NSMR_WR_NS(x)       ((x) << S_FW_RI_FR_NSMR_WR_NS)
-#define G_FW_RI_FR_NSMR_WR_NS(x)       \
-    (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS)
-#define F_FW_RI_FR_NSMR_WR_NS  V_FW_RI_FR_NSMR_WR_NS(1U)
+#define FW_RI_FR_NSMR_WR_NS_S          5
+#define FW_RI_FR_NSMR_WR_NS_M          0x1
+#define FW_RI_FR_NSMR_WR_NS_V(x)       ((x) << FW_RI_FR_NSMR_WR_NS_S)
+#define FW_RI_FR_NSMR_WR_NS_G(x)       \
+       (((x) >> FW_RI_FR_NSMR_WR_NS_S) & FW_RI_FR_NSMR_WR_NS_M)
+#define FW_RI_FR_NSMR_WR_NS_F  FW_RI_FR_NSMR_WR_NS_V(1U)
 
-#define S_FW_RI_FR_NSMR_WR_DCACPU      0
-#define M_FW_RI_FR_NSMR_WR_DCACPU      0x1f
-#define V_FW_RI_FR_NSMR_WR_DCACPU(x)   ((x) << S_FW_RI_FR_NSMR_WR_DCACPU)
-#define G_FW_RI_FR_NSMR_WR_DCACPU(x)   \
-    (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU)
+#define FW_RI_FR_NSMR_WR_DCACPU_S      0
+#define FW_RI_FR_NSMR_WR_DCACPU_M      0x1f
+#define FW_RI_FR_NSMR_WR_DCACPU_V(x)   ((x) << FW_RI_FR_NSMR_WR_DCACPU_S)
+#define FW_RI_FR_NSMR_WR_DCACPU_G(x)   \
+       (((x) >> FW_RI_FR_NSMR_WR_DCACPU_S) & FW_RI_FR_NSMR_WR_DCACPU_M)
 
 struct fw_ri_inv_lstag_wr {
        __u8   opcode;
@@ -740,18 +740,18 @@ struct fw_ri_wr {
        } u;
 };
 
-#define S_FW_RI_WR_MPAREQBIT   7
-#define M_FW_RI_WR_MPAREQBIT   0x1
-#define V_FW_RI_WR_MPAREQBIT(x)        ((x) << S_FW_RI_WR_MPAREQBIT)
-#define G_FW_RI_WR_MPAREQBIT(x)        \
-    (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT)
-#define F_FW_RI_WR_MPAREQBIT   V_FW_RI_WR_MPAREQBIT(1U)
+#define FW_RI_WR_MPAREQBIT_S   7
+#define FW_RI_WR_MPAREQBIT_M   0x1
+#define FW_RI_WR_MPAREQBIT_V(x)        ((x) << FW_RI_WR_MPAREQBIT_S)
+#define FW_RI_WR_MPAREQBIT_G(x)        \
+       (((x) >> FW_RI_WR_MPAREQBIT_S) & FW_RI_WR_MPAREQBIT_M)
+#define FW_RI_WR_MPAREQBIT_F   FW_RI_WR_MPAREQBIT_V(1U)
 
-#define S_FW_RI_WR_P2PTYPE     0
-#define M_FW_RI_WR_P2PTYPE     0xf
-#define V_FW_RI_WR_P2PTYPE(x)  ((x) << S_FW_RI_WR_P2PTYPE)
-#define G_FW_RI_WR_P2PTYPE(x)  \
-    (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
+#define FW_RI_WR_P2PTYPE_S     0
+#define FW_RI_WR_P2PTYPE_M     0xf
+#define FW_RI_WR_P2PTYPE_V(x)  ((x) << FW_RI_WR_P2PTYPE_S)
+#define FW_RI_WR_P2PTYPE_G(x)  \
+       (((x) >> FW_RI_WR_P2PTYPE_S) & FW_RI_WR_P2PTYPE_M)
 
 struct tcp_options {
        __be16 mss;
@@ -783,58 +783,58 @@ struct cpl_pass_accept_req {
 };
 
 /* cpl_pass_accept_req.hdr_len fields */
-#define S_SYN_RX_CHAN    0
-#define M_SYN_RX_CHAN    0xF
-#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN)
-#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN)
-
-#define S_TCP_HDR_LEN    10
-#define M_TCP_HDR_LEN    0x3F
-#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN)
-#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN)
-
-#define S_IP_HDR_LEN    16
-#define M_IP_HDR_LEN    0x3FF
-#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN)
-#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN)
-
-#define S_ETH_HDR_LEN    26
-#define M_ETH_HDR_LEN    0x1F
-#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN)
-#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN)
+#define SYN_RX_CHAN_S    0
+#define SYN_RX_CHAN_M    0xF
+#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S)
+#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M)
+
+#define TCP_HDR_LEN_S    10
+#define TCP_HDR_LEN_M    0x3F
+#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S)
+#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M)
+
+#define IP_HDR_LEN_S    16
+#define IP_HDR_LEN_M    0x3FF
+#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S)
+#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M)
+
+#define ETH_HDR_LEN_S    26
+#define ETH_HDR_LEN_M    0x1F
+#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S)
+#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M)
 
 /* cpl_pass_accept_req.l2info fields */
-#define S_SYN_MAC_IDX    0
-#define M_SYN_MAC_IDX    0x1FF
-#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX)
-#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX)
+#define SYN_MAC_IDX_S    0
+#define SYN_MAC_IDX_M    0x1FF
+#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S)
+#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M)
 
-#define S_SYN_XACT_MATCH    9
-#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH)
-#define F_SYN_XACT_MATCH    V_SYN_XACT_MATCH(1U)
+#define SYN_XACT_MATCH_S    9
+#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S)
+#define SYN_XACT_MATCH_F    SYN_XACT_MATCH_V(1U)
 
-#define S_SYN_INTF    12
-#define M_SYN_INTF    0xF
-#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
-#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
+#define SYN_INTF_S    12
+#define SYN_INTF_M    0xF
+#define SYN_INTF_V(x) ((x) << SYN_INTF_S)
+#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M)
 
 struct ulptx_idata {
        __be32 cmd_more;
        __be32 len;
 };
 
-#define S_ULPTX_NSGE    0
-#define M_ULPTX_NSGE    0xFFFF
-#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
+#define ULPTX_NSGE_S    0
+#define ULPTX_NSGE_M    0xFFFF
+#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
 
-#define S_RX_DACK_MODE    29
-#define M_RX_DACK_MODE    0x3
-#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
-#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
+#define RX_DACK_MODE_S    29
+#define RX_DACK_MODE_M    0x3
+#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S)
+#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M)
 
-#define S_RX_DACK_CHANGE    31
-#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
-#define F_RX_DACK_CHANGE    V_RX_DACK_CHANGE(1U)
+#define RX_DACK_CHANGE_S    31
+#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S)
+#define RX_DACK_CHANGE_F    RX_DACK_CHANGE_V(1U)
 
 enum {                     /* TCP congestion control algorithms */
        CONG_ALG_RENO,
@@ -843,10 +843,10 @@ enum {                     /* TCP congestion control algorithms */
        CONG_ALG_HIGHSPEED
 };
 
-#define S_CONG_CNTRL    14
-#define M_CONG_CNTRL    0x3
-#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
-#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
+#define CONG_CNTRL_S    14
+#define CONG_CNTRL_M    0x3
+#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
+#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
 
 #define CONG_CNTRL_VALID   (1 << 18)
 
index 2d8c3397774f6ba84c207bd35a64a203f2916620..f50a546224adf09b91ec7f4b1d75b2bbfde5c880 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/slab.h>
 #include <linux/inet.h>
 #include <linux/string.h>
+#include <linux/mlx4/driver.h>
 
 #include "mlx4_ib.h"
 
index 0eb141c41416cd89c99a3529df3e1dd0f2246234..a31e031afd87486ce0c876c541d4efc7cb981f8c 100644 (file)
@@ -154,7 +154,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
                        continue;
 
                slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
-               if (slave_id >= dev->dev->num_vfs + 1)
+               if (slave_id >= dev->dev->persist->num_vfs + 1)
                        return;
                tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
                form_cache_ag = get_cached_alias_guid(dev, port_num,
index 82a7dd87089b66efa0e925116b4d72053c341a3f..c7619716c31dd92cb5ddad4f77d98528392698b5 100644 (file)
@@ -1951,7 +1951,8 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
        ctx->ib_dev = &dev->ib_dev;
 
        for (i = 0;
-            i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1));
+            i < min(dev->dev->caps.sqp_demux,
+            (u16)(dev->dev->persist->num_vfs + 1));
             i++) {
                struct mlx4_active_ports actv_ports =
                        mlx4_get_active_ports(dev->dev, i);
index 57ecc5b204f3f6fdb01f1c2bebcdb6c752ad2ece..2ed5b996b2f43611584c1d7328bd8b839a0e8f9b 100644 (file)
@@ -198,7 +198,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
 
        props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
                0xffffff;
-       props->vendor_part_id      = dev->dev->pdev->device;
+       props->vendor_part_id      = dev->dev->persist->pdev->device;
        props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
        memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
 
@@ -351,6 +351,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
        enum ib_mtu tmp;
        struct mlx4_cmd_mailbox *mailbox;
        int err = 0;
+       int is_bonded = mlx4_is_bonded(mdev->dev);
 
        mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
        if (IS_ERR(mailbox))
@@ -374,8 +375,12 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
        props->state            = IB_PORT_DOWN;
        props->phys_state       = state_to_phys_state(props->state);
        props->active_mtu       = IB_MTU_256;
+       if (is_bonded)
+               rtnl_lock(); /* required to get upper dev */
        spin_lock_bh(&iboe->lock);
        ndev = iboe->netdevs[port - 1];
+       if (ndev && is_bonded)
+               ndev = netdev_master_upper_dev_get(ndev);
        if (!ndev)
                goto out_unlock;
 
@@ -387,6 +392,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
        props->phys_state       = state_to_phys_state(props->state);
 out_unlock:
        spin_unlock_bh(&iboe->lock);
+       if (is_bonded)
+               rtnl_unlock();
 out:
        mlx4_free_cmd_mailbox(mdev->dev, mailbox);
        return err;
@@ -844,7 +851,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
 
 struct mlx4_ib_steering {
        struct list_head list;
-       u64 reg_id;
+       struct mlx4_flow_reg_id reg_id;
        union ib_gid gid;
 };
 
@@ -1114,7 +1121,8 @@ static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_
        struct mlx4_dev *dev = to_mdev(qp->device)->dev;
        int err = 0;
 
-       if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+       if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
+           dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
                return 0; /* do nothing */
 
        ib_flow = flow_attr + 1;
@@ -1134,9 +1142,11 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
                                    struct ib_flow_attr *flow_attr,
                                    int domain)
 {
-       int err = 0, i = 0;
+       int err = 0, i = 0, j = 0;
        struct mlx4_ib_flow *mflow;
        enum mlx4_net_trans_promisc_mode type[2];
+       struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
+       int is_bonded = mlx4_is_bonded(dev);
 
        memset(type, 0, sizeof(type));
 
@@ -1171,26 +1181,55 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
 
        while (i < ARRAY_SIZE(type) && type[i]) {
                err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
-                                           &mflow->reg_id[i]);
+                                           &mflow->reg_id[i].id);
                if (err)
                        goto err_create_flow;
                i++;
+               if (is_bonded) {
+                       flow_attr->port = 2;
+                       err = __mlx4_ib_create_flow(qp, flow_attr,
+                                                   domain, type[j],
+                                                   &mflow->reg_id[j].mirror);
+                       flow_attr->port = 1;
+                       if (err)
+                               goto err_create_flow;
+                       j++;
+               }
+
        }
 
        if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
-               err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
+               err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
+                                              &mflow->reg_id[i].id);
                if (err)
                        goto err_create_flow;
                i++;
+               if (is_bonded) {
+                       flow_attr->port = 2;
+                       err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
+                                                      &mflow->reg_id[j].mirror);
+                       flow_attr->port = 1;
+                       if (err)
+                               goto err_create_flow;
+                       j++;
+               }
+               /* function to create mirror rule */
        }
 
        return &mflow->ibflow;
 
 err_create_flow:
        while (i) {
-               (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
+               (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
+                                            mflow->reg_id[i].id);
                i--;
        }
+
+       while (j) {
+               (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
+                                            mflow->reg_id[j].mirror);
+               j--;
+       }
 err_free:
        kfree(mflow);
        return ERR_PTR(err);
@@ -1203,10 +1242,16 @@ static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
        struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
        struct mlx4_ib_flow *mflow = to_mflow(flow_id);
 
-       while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) {
-               err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]);
+       while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
+               err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
                if (err)
                        ret = err;
+               if (mflow->reg_id[i].mirror) {
+                       err = __mlx4_ib_destroy_flow(mdev->dev,
+                                                    mflow->reg_id[i].mirror);
+                       if (err)
+                               ret = err;
+               }
                i++;
        }
 
@@ -1218,11 +1263,12 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 {
        int err;
        struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+       struct mlx4_dev *dev = mdev->dev;
        struct mlx4_ib_qp *mqp = to_mqp(ibqp);
-       u64 reg_id;
        struct mlx4_ib_steering *ib_steering = NULL;
        enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
                MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
+       struct mlx4_flow_reg_id reg_id;
 
        if (mdev->dev->caps.steering_mode ==
            MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1234,10 +1280,20 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
                                    !!(mqp->flags &
                                       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
-                                   prot, &reg_id);
+                                   prot, &reg_id.id);
        if (err)
                goto err_malloc;
 
+       reg_id.mirror = 0;
+       if (mlx4_is_bonded(dev)) {
+               err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 2,
+                                           !!(mqp->flags &
+                                           MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
+                                           prot, &reg_id.mirror);
+               if (err)
+                       goto err_add;
+       }
+
        err = add_gid_entry(ibqp, gid);
        if (err)
                goto err_add;
@@ -1253,7 +1309,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 
 err_add:
        mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
-                             prot, reg_id);
+                             prot, reg_id.id);
+       if (reg_id.mirror)
+               mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+                                     prot, reg_id.mirror);
 err_malloc:
        kfree(ib_steering);
 
@@ -1280,10 +1339,12 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 {
        int err;
        struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+       struct mlx4_dev *dev = mdev->dev;
        struct mlx4_ib_qp *mqp = to_mqp(ibqp);
        struct net_device *ndev;
        struct mlx4_ib_gid_entry *ge;
-       u64 reg_id = 0;
+       struct mlx4_flow_reg_id reg_id = {0, 0};
+
        enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
                MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
 
@@ -1308,10 +1369,17 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        }
 
        err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
-                                   prot, reg_id);
+                                   prot, reg_id.id);
        if (err)
                return err;
 
+       if (mlx4_is_bonded(dev)) {
+               err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+                                           prot, reg_id.mirror);
+               if (err)
+                       return err;
+       }
+
        mutex_lock(&mqp->mutex);
        ge = find_gid_entry(mqp, gid->raw);
        if (ge) {
@@ -1375,7 +1443,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
 {
        struct mlx4_ib_dev *dev =
                container_of(device, struct mlx4_ib_dev, ib_dev.dev);
-       return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
+       return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
 }
 
 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
@@ -1439,6 +1507,7 @@ static void update_gids_task(struct work_struct *work)
        union ib_gid *gids;
        int err;
        struct mlx4_dev *dev = gw->dev->dev;
+       int is_bonded = mlx4_is_bonded(dev);
 
        if (!gw->dev->ib_active)
                return;
@@ -1458,7 +1527,10 @@ static void update_gids_task(struct work_struct *work)
        if (err)
                pr_warn("set port command failed\n");
        else
-               mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
+               if ((gw->port == 1) || !is_bonded)
+                       mlx4_ib_dispatch_event(gw->dev,
+                                              is_bonded ? 1 : gw->port,
+                                              IB_EVENT_GID_CHANGE);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        kfree(gw);
@@ -1874,7 +1946,8 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
                                 * don't want the bond IP based gids in the table since
                                 * flows that select port by gid may get the down port.
                                */
-                               if (port_state == IB_PORT_DOWN) {
+                               if (port_state == IB_PORT_DOWN &&
+                                   !mlx4_is_bonded(ibdev->dev)) {
                                        reset_gid_table(ibdev, port);
                                        mlx4_ib_set_default_gid(ibdev,
                                                                curr_netdev,
@@ -1937,7 +2010,8 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
        int i;
 
        if (mlx4_is_master(ibdev->dev)) {
-               for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
+               for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
+                    ++slave) {
                        for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
                                for (i = 0;
                                     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
@@ -1994,7 +2068,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
        mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
                for (j = 0; j < eq_per_port; j++) {
                        snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
-                                i, j, dev->pdev->bus->name);
+                                i, j, dev->persist->pdev->bus->name);
                        /* Set IRQ for specific name (per ring) */
                        if (mlx4_assign_eq(dev, name, NULL,
                                           &ibdev->eq_table[eq])) {
@@ -2045,6 +2119,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        int err;
        struct mlx4_ib_iboe *iboe;
        int ib_num_ports = 0;
+       int num_req_counters;
 
        pr_info_once("%s", mlx4_ib_version);
 
@@ -2058,7 +2133,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 
        ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
        if (!ibdev) {
-               dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
+               dev_err(&dev->persist->pdev->dev,
+                       "Device struct alloc failed\n");
                return NULL;
        }
 
@@ -2077,15 +2153,17 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
 
        ibdev->dev = dev;
+       ibdev->bond_next_port   = 0;
 
        strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
        ibdev->ib_dev.owner             = THIS_MODULE;
        ibdev->ib_dev.node_type         = RDMA_NODE_IB_CA;
        ibdev->ib_dev.local_dma_lkey    = dev->caps.reserved_lkey;
        ibdev->num_ports                = num_ports;
-       ibdev->ib_dev.phys_port_cnt     = ibdev->num_ports;
+       ibdev->ib_dev.phys_port_cnt     = mlx4_is_bonded(dev) ?
+                                               1 : ibdev->num_ports;
        ibdev->ib_dev.num_comp_vectors  = dev->caps.num_comp_vectors;
-       ibdev->ib_dev.dma_device        = &dev->pdev->dev;
+       ibdev->ib_dev.dma_device        = &dev->persist->pdev->dev;
 
        if (dev->caps.userspace_caps)
                ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
@@ -2204,7 +2282,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        if (init_node_data(ibdev))
                goto err_map;
 
-       for (i = 0; i < ibdev->num_ports; ++i) {
+       num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
+       for (i = 0; i < num_req_counters; ++i) {
                mutex_init(&ibdev->qp1_proxy_lock[i]);
                if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
                                                IB_LINK_LAYER_ETHERNET) {
@@ -2215,6 +2294,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                        ibdev->counters[i] = -1;
                }
        }
+       if (mlx4_is_bonded(dev))
+               for (i = 1; i < ibdev->num_ports ; ++i)
+                       ibdev->counters[i] = ibdev->counters[0];
+
 
        mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
                ib_num_ports++;
@@ -2236,7 +2319,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                                sizeof(long),
                                GFP_KERNEL);
                if (!ibdev->ib_uc_qpns_bitmap) {
-                       dev_err(&dev->pdev->dev, "bit map alloc failed\n");
+                       dev_err(&dev->persist->pdev->dev,
+                               "bit map alloc failed\n");
                        goto err_steer_qp_release;
                }
 
@@ -2534,6 +2618,38 @@ out:
        return;
 }
 
+static void handle_bonded_port_state_event(struct work_struct *work)
+{
+       struct ib_event_work *ew =
+               container_of(work, struct ib_event_work, work);
+       struct mlx4_ib_dev *ibdev = ew->ib_dev;
+       enum ib_port_state bonded_port_state = IB_PORT_NOP;
+       int i;
+       struct ib_event ibev;
+
+       kfree(ew);
+       spin_lock_bh(&ibdev->iboe.lock);
+       for (i = 0; i < MLX4_MAX_PORTS; ++i) {
+               struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
+
+               enum ib_port_state curr_port_state =
+                       (netif_running(curr_netdev) &&
+                        netif_carrier_ok(curr_netdev)) ?
+                       IB_PORT_ACTIVE : IB_PORT_DOWN;
+
+               bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
+                       curr_port_state : IB_PORT_ACTIVE;
+       }
+       spin_unlock_bh(&ibdev->iboe.lock);
+
+       ibev.device = &ibdev->ib_dev;
+       ibev.element.port_num = 1;
+       ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
+               IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+
+       ib_dispatch_event(&ibev);
+}
+
 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
                          enum mlx4_dev_event event, unsigned long param)
 {
@@ -2543,6 +2659,18 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
        struct ib_event_work *ew;
        int p = 0;
 
+       if (mlx4_is_bonded(dev) &&
+           ((event == MLX4_DEV_EVENT_PORT_UP) ||
+           (event == MLX4_DEV_EVENT_PORT_DOWN))) {
+               ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
+               if (!ew)
+                       return;
+               INIT_WORK(&ew->work, handle_bonded_port_state_event);
+               ew->ib_dev = ibdev;
+               queue_work(wq, &ew->work);
+               return;
+       }
+
        if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
                eqe = (struct mlx4_eqe *)param;
        else
@@ -2603,7 +2731,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
        }
 
        ibev.device           = ibdev_ptr;
-       ibev.element.port_num = (u8) p;
+       ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
 
        ib_dispatch_event(&ibev);
 }
@@ -2612,7 +2740,8 @@ static struct mlx4_interface mlx4_ib_interface = {
        .add            = mlx4_ib_add,
        .remove         = mlx4_ib_remove,
        .event          = mlx4_ib_event,
-       .protocol       = MLX4_PROT_IB_IPV6
+       .protocol       = MLX4_PROT_IB_IPV6,
+       .flags          = MLX4_INTFF_BONDING
 };
 
 static int __init mlx4_ib_init(void)
index 6eb743f65f6f5633eb126ae5fe7ac768299fe77d..721540c9163d540b98717fc8f4c13cb2302d5752 100644 (file)
@@ -134,10 +134,17 @@ struct mlx4_ib_fmr {
        struct mlx4_fmr         mfmr;
 };
 
+#define MAX_REGS_PER_FLOW 2
+
+struct mlx4_flow_reg_id {
+       u64 id;
+       u64 mirror;
+};
+
 struct mlx4_ib_flow {
        struct ib_flow ibflow;
        /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
-       u64 reg_id[2];
+       struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW];
 };
 
 struct mlx4_ib_wq {
@@ -527,6 +534,7 @@ struct mlx4_ib_dev {
        struct mlx4_ib_qp      *qp1_proxy[MLX4_MAX_PORTS];
        /* lock when destroying qp1_proxy and getting netdev events */
        struct mutex            qp1_proxy_lock[MLX4_MAX_PORTS];
+       u8                      bond_next_port;
 };
 
 struct ib_event_work {
@@ -622,6 +630,13 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
        return container_of(ibah, struct mlx4_ib_ah, ibah);
 }
 
+static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
+{
+       dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports;
+
+       return dev->bond_next_port + 1;
+}
+
 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
 
index c36ccbd9a644f16e85cdcb4a323cbf1803683292..e0d271782d0a0012577a100e4a59c27f17f9366d 100644 (file)
@@ -401,7 +401,8 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device
        if (!mfrpl->ibfrpl.page_list)
                goto err_free;
 
-       mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
+       mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->persist->
+                                                    pdev->dev,
                                                     size, &mfrpl->map,
                                                     GFP_KERNEL);
        if (!mfrpl->mapped_page_list)
@@ -423,7 +424,8 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
        struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
        int size = page_list->max_page_list_len * sizeof (u64);
 
-       dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list,
+       dma_free_coherent(&dev->dev->persist->pdev->dev, size,
+                         mfrpl->mapped_page_list,
                          mfrpl->map);
        kfree(mfrpl->ibfrpl.page_list);
        kfree(mfrpl);
index cf000b7ad64f9b11698ee8dbfa2aec9883e886db..792f9dc86adac3a098f167c69f29455945eb315b 100644 (file)
@@ -40,6 +40,7 @@
 #include <rdma/ib_addr.h>
 #include <rdma/ib_mad.h>
 
+#include <linux/mlx4/driver.h>
 #include <linux/mlx4/qp.h>
 
 #include "mlx4_ib.h"
@@ -93,17 +94,6 @@ enum {
 #ifndef ETH_ALEN
 #define ETH_ALEN        6
 #endif
-static inline u64 mlx4_mac_to_u64(u8 *addr)
-{
-       u64 mac = 0;
-       int i;
-
-       for (i = 0; i < ETH_ALEN; i++) {
-               mac <<= 8;
-               mac |= addr[i];
-       }
-       return mac;
-}
 
 static const __be32 mlx4_ib_opcode[] = {
        [IB_WR_SEND]                            = cpu_to_be32(MLX4_OPCODE_SEND),
@@ -1915,6 +1905,22 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                goto out;
        }
 
+       if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) {
+               if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
+                       if ((ibqp->qp_type == IB_QPT_RC) ||
+                           (ibqp->qp_type == IB_QPT_UD) ||
+                           (ibqp->qp_type == IB_QPT_UC) ||
+                           (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
+                           (ibqp->qp_type == IB_QPT_XRC_INI)) {
+                               attr->port_num = mlx4_ib_bond_next_port(dev);
+                       }
+               } else {
+                       /* no sense in changing port_num
+                        * when ports are bonded */
+                       attr_mask &= ~IB_QP_PORT;
+               }
+       }
+
        if ((attr_mask & IB_QP_PORT) &&
            (attr->port_num == 0 || attr->port_num > dev->num_ports)) {
                pr_debug("qpn 0x%x: invalid port number (%d) specified "
@@ -1965,6 +1971,9 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
        err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
 
+       if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
+               attr->port_num = 1;
+
 out:
        mutex_unlock(&qp->mutex);
        return err;
index cb4c66e723b59d915b8e7f83221dcd437d180a8e..d10c2b8a5dadbe7eef1774c2eb29471ac317cb27 100644 (file)
@@ -375,7 +375,7 @@ static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max)
        char base_name[9];
 
        /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */
-       strlcpy(name, pci_name(dev->dev->pdev), max);
+       strlcpy(name, pci_name(dev->dev->persist->pdev), max);
        strncpy(base_name, name, 8); /*till xxxx:yy:*/
        base_name[8] = '\0';
        /* with no ARI only 3 last bits are used so when the fn is higher than 8
@@ -792,7 +792,7 @@ static int register_pkey_tree(struct mlx4_ib_dev *device)
        if (!mlx4_is_master(device->dev))
                return 0;
 
-       for (i = 0; i <= device->dev->num_vfs; ++i)
+       for (i = 0; i <= device->dev->persist->num_vfs; ++i)
                register_one_pkey_tree(device, i);
 
        return 0;
@@ -807,7 +807,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device)
        if (!mlx4_is_master(device->dev))
                return;
 
-       for (slave = device->dev->num_vfs; slave >= 0; --slave) {
+       for (slave = device->dev->persist->num_vfs; slave >= 0; --slave) {
                list_for_each_entry_safe(p, t,
                                         &device->pkeys.pkey_port_list[slave],
                                         entry) {
index b56e4c5593ee92f3fe69d542c77919ee93e3cfc7..611a9fdf2f383cf0982415d4603906e208f24da8 100644 (file)
@@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
                for (k = 0; k < len; k++) {
                        if (!(i & mask)) {
                                tmp = (unsigned long)pfn;
-                               m = min(m, find_first_bit(&tmp, sizeof(tmp)));
+                               m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp)));
                                skip = 1 << m;
                                mask = skip - 1;
                                base = pfn;
index 49eb5111d2cd0b96c2f8ce8e4260e1fbfd5497cd..70acda91eb2a934e79e999a1a3297d92a2148ae2 100644 (file)
@@ -373,11 +373,11 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
        wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
 
        /* setup the VLAN tag if present */
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
-                               netdev->name, vlan_tx_tag_get(skb));
+                               netdev->name, skb_vlan_tag_get(skb));
                wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
-               wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
+               wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
        } else
                wqe_misc = 0;
 
@@ -576,11 +576,12 @@ tso_sq_no_longer_full:
                                wqe_fragment_length =
                                                (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
                                /* setup the VLAN tag if present */
-                               if (vlan_tx_tag_present(skb)) {
+                               if (skb_vlan_tag_present(skb)) {
                                        nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
-                                                       netdev->name, vlan_tx_tag_get(skb) );
+                                                       netdev->name,
+                                                 skb_vlan_tag_get(skb));
                                        wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
-                                       wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
+                                       wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
                                } else
                                        wqe_misc = 0;
 
index 8ba80a6d3a46d17daf233945e76d8760c107a752..d7562beb542367faf1b93d7ba66e8ef879c73bf4 100644 (file)
@@ -98,15 +98,9 @@ enum {
 
        IPOIB_MCAST_FLAG_FOUND    = 0,  /* used in set_multicast_list */
        IPOIB_MCAST_FLAG_SENDONLY = 1,
-       /*
-        * For IPOIB_MCAST_FLAG_BUSY
-        * When set, in flight join and mcast->mc is unreliable
-        * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or
-        *   haven't started yet
-        * When clear and mcast->mc is valid pointer, join was successful
-        */
-       IPOIB_MCAST_FLAG_BUSY     = 2,
+       IPOIB_MCAST_FLAG_BUSY     = 2,  /* joining or already joined */
        IPOIB_MCAST_FLAG_ATTACHED = 3,
+       IPOIB_MCAST_JOIN_STARTED  = 4,
 
        MAX_SEND_CQE              = 16,
        IPOIB_CM_COPYBREAK        = 256,
@@ -323,7 +317,6 @@ struct ipoib_dev_priv {
        struct list_head multicast_list;
        struct rb_root multicast_tree;
 
-       struct workqueue_struct *wq;
        struct delayed_work mcast_task;
        struct work_struct carrier_on_task;
        struct work_struct flush_light;
@@ -484,10 +477,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
 void ipoib_pkey_event(struct work_struct *work);
 void ipoib_ib_dev_cleanup(struct net_device *dev);
 
-int ipoib_ib_dev_open(struct net_device *dev);
+int ipoib_ib_dev_open(struct net_device *dev, int flush);
 int ipoib_ib_dev_up(struct net_device *dev);
-int ipoib_ib_dev_down(struct net_device *dev);
-int ipoib_ib_dev_stop(struct net_device *dev);
+int ipoib_ib_dev_down(struct net_device *dev, int flush);
+int ipoib_ib_dev_stop(struct net_device *dev, int flush);
 void ipoib_pkey_dev_check_presence(struct net_device *dev);
 
 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
@@ -499,7 +492,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
 
 void ipoib_mcast_restart_task(struct work_struct *work);
 int ipoib_mcast_start_thread(struct net_device *dev);
-int ipoib_mcast_stop_thread(struct net_device *dev);
+int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
 
 void ipoib_mcast_dev_down(struct net_device *dev);
 void ipoib_mcast_dev_flush(struct net_device *dev);
index 56959adb6c7da51ccbb6d20307247b7cb69ad55a..933efcea0d03f11b4da3967b8eedc137da21e08a 100644 (file)
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
        }
 
        spin_lock_irq(&priv->lock);
-       queue_delayed_work(priv->wq,
+       queue_delayed_work(ipoib_workqueue,
                           &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
        /* Add this entry to passive ids list head, but do not re-add it
         * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
                        spin_lock_irqsave(&priv->lock, flags);
                        list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
                        ipoib_cm_start_rx_drain(priv);
-                       queue_work(priv->wq, &priv->cm.rx_reap_task);
+                       queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
                        spin_unlock_irqrestore(&priv->lock, flags);
                } else
                        ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
                                spin_lock_irqsave(&priv->lock, flags);
                                list_move(&p->list, &priv->cm.rx_reap_list);
                                spin_unlock_irqrestore(&priv->lock, flags);
-                               queue_work(priv->wq, &priv->cm.rx_reap_task);
+                               queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
                        }
                        return;
                }
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
 
                if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
                        list_move(&tx->list, &priv->cm.reap_list);
-                       queue_work(priv->wq, &priv->cm.reap_task);
+                       queue_work(ipoib_workqueue, &priv->cm.reap_task);
                }
 
                clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
 
                if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
                        list_move(&tx->list, &priv->cm.reap_list);
-                       queue_work(priv->wq, &priv->cm.reap_task);
+                       queue_work(ipoib_workqueue, &priv->cm.reap_task);
                }
 
                spin_unlock_irqrestore(&priv->lock, flags);
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
        tx->dev = dev;
        list_add(&tx->list, &priv->cm.start_list);
        set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
-       queue_work(priv->wq, &priv->cm.start_task);
+       queue_work(ipoib_workqueue, &priv->cm.start_task);
        return tx;
 }
 
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
        if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
                spin_lock_irqsave(&priv->lock, flags);
                list_move(&tx->list, &priv->cm.reap_list);
-               queue_work(priv->wq, &priv->cm.reap_task);
+               queue_work(ipoib_workqueue, &priv->cm.reap_task);
                ipoib_dbg(priv, "Reap connection for gid %pI6\n",
                          tx->neigh->daddr + 4);
                tx->neigh = NULL;
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
 
        skb_queue_tail(&priv->cm.skb_queue, skb);
        if (e)
-               queue_work(priv->wq, &priv->cm.skb_task);
+               queue_work(ipoib_workqueue, &priv->cm.skb_task);
 }
 
 static void ipoib_cm_rx_reap(struct work_struct *work)
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
        }
 
        if (!list_empty(&priv->cm.passive_ids))
-               queue_delayed_work(priv->wq,
+               queue_delayed_work(ipoib_workqueue,
                                   &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
        spin_unlock_irq(&priv->lock);
 }
index fe65abb5150c76b2eb941b3b2331930bc5b2b81e..72626c3481749b962fe96b79722d7c8e9c99c585 100644 (file)
@@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work)
        __ipoib_reap_ah(dev);
 
        if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
-               queue_delayed_work(priv->wq, &priv->ah_reap_task,
+               queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
                                   round_jiffies_relative(HZ));
 }
 
@@ -664,7 +664,7 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx)
        drain_tx_cq((struct net_device *)ctx);
 }
 
-int ipoib_ib_dev_open(struct net_device *dev)
+int ipoib_ib_dev_open(struct net_device *dev, int flush)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        int ret;
@@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
        }
 
        clear_bit(IPOIB_STOP_REAPER, &priv->flags);
-       queue_delayed_work(priv->wq, &priv->ah_reap_task,
+       queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
                           round_jiffies_relative(HZ));
 
        if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
@@ -706,7 +706,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
 dev_stop:
        if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
                napi_enable(&priv->napi);
-       ipoib_ib_dev_stop(dev);
+       ipoib_ib_dev_stop(dev, flush);
        return -1;
 }
 
@@ -738,7 +738,7 @@ int ipoib_ib_dev_up(struct net_device *dev)
        return ipoib_mcast_start_thread(dev);
 }
 
-int ipoib_ib_dev_down(struct net_device *dev)
+int ipoib_ib_dev_down(struct net_device *dev, int flush)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
@@ -747,7 +747,7 @@ int ipoib_ib_dev_down(struct net_device *dev)
        clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
        netif_carrier_off(dev);
 
-       ipoib_mcast_stop_thread(dev);
+       ipoib_mcast_stop_thread(dev, flush);
        ipoib_mcast_dev_flush(dev);
 
        ipoib_flush_paths(dev);
@@ -807,7 +807,7 @@ void ipoib_drain_cq(struct net_device *dev)
        local_bh_enable();
 }
 
-int ipoib_ib_dev_stop(struct net_device *dev)
+int ipoib_ib_dev_stop(struct net_device *dev, int flush)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ib_qp_attr qp_attr;
@@ -880,7 +880,8 @@ timeout:
        /* Wait for all AHs to be reaped */
        set_bit(IPOIB_STOP_REAPER, &priv->flags);
        cancel_delayed_work(&priv->ah_reap_task);
-       flush_workqueue(priv->wq);
+       if (flush)
+               flush_workqueue(ipoib_workqueue);
 
        begin = jiffies;
 
@@ -917,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
                    (unsigned long) dev);
 
        if (dev->flags & IFF_UP) {
-               if (ipoib_ib_dev_open(dev)) {
+               if (ipoib_ib_dev_open(dev, 1)) {
                        ipoib_transport_dev_cleanup(dev);
                        return -ENODEV;
                }
@@ -1039,12 +1040,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
        }
 
        if (level >= IPOIB_FLUSH_NORMAL)
-               ipoib_ib_dev_down(dev);
+               ipoib_ib_dev_down(dev, 0);
 
        if (level == IPOIB_FLUSH_HEAVY) {
                if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
-                       ipoib_ib_dev_stop(dev);
-               if (ipoib_ib_dev_open(dev) != 0)
+                       ipoib_ib_dev_stop(dev, 0);
+               if (ipoib_ib_dev_open(dev, 0) != 0)
                        return;
                if (netif_queue_stopped(dev))
                        netif_start_queue(dev);
@@ -1096,7 +1097,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
         */
        ipoib_flush_paths(dev);
 
-       ipoib_mcast_stop_thread(dev);
+       ipoib_mcast_stop_thread(dev, 1);
        ipoib_mcast_dev_flush(dev);
 
        ipoib_transport_dev_cleanup(dev);
index 6bad17d4d5880886f88ef48d8424abe4347cdc50..58b5aa3b6f2dded5d2e6d15aff080551aa9eddd9 100644 (file)
@@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev)
 
        set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
 
-       if (ipoib_ib_dev_open(dev)) {
+       if (ipoib_ib_dev_open(dev, 1)) {
                if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
                        return 0;
                goto err_disable;
@@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev)
        return 0;
 
 err_stop:
-       ipoib_ib_dev_stop(dev);
+       ipoib_ib_dev_stop(dev, 1);
 
 err_disable:
        clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
@@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev)
 
        netif_stop_queue(dev);
 
-       ipoib_ib_dev_down(dev);
-       ipoib_ib_dev_stop(dev);
+       ipoib_ib_dev_down(dev, 1);
+       ipoib_ib_dev_stop(dev, 0);
 
        if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
                struct ipoib_dev_priv *cpriv;
@@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
                return;
        }
 
-       queue_work(priv->wq, &priv->restart_task);
+       queue_work(ipoib_workqueue, &priv->restart_task);
 }
 
 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
@@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work)
        __ipoib_reap_neigh(priv);
 
        if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
-               queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+               queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
                                   arp_tbl.gc_interval);
 }
 
@@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
 
        /* start garbage collection */
        clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
-       queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+       queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
                           arp_tbl.gc_interval);
 
        return 0;
@@ -1262,13 +1262,15 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
+       if (ipoib_neigh_hash_init(priv) < 0)
+               goto out;
        /* Allocate RX/TX "rings" to hold queued skbs */
        priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
                                GFP_KERNEL);
        if (!priv->rx_ring) {
                printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
                       ca->name, ipoib_recvq_size);
-               goto out;
+               goto out_neigh_hash_cleanup;
        }
 
        priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
@@ -1283,24 +1285,16 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
        if (ipoib_ib_dev_init(dev, ca, port))
                goto out_tx_ring_cleanup;
 
-       /*
-        * Must be after ipoib_ib_dev_init so we can allocate a per
-        * device wq there and use it here
-        */
-       if (ipoib_neigh_hash_init(priv) < 0)
-               goto out_dev_uninit;
-
        return 0;
 
-out_dev_uninit:
-       ipoib_ib_dev_cleanup(dev);
-
 out_tx_ring_cleanup:
        vfree(priv->tx_ring);
 
 out_rx_ring_cleanup:
        kfree(priv->rx_ring);
 
+out_neigh_hash_cleanup:
+       ipoib_neigh_hash_uninit(dev);
 out:
        return -ENOMEM;
 }
@@ -1323,12 +1317,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
        }
        unregister_netdevice_many(&head);
 
-       /*
-        * Must be before ipoib_ib_dev_cleanup or we delete an in use
-        * work queue
-        */
-       ipoib_neigh_hash_uninit(dev);
-
        ipoib_ib_dev_cleanup(dev);
 
        kfree(priv->rx_ring);
@@ -1336,6 +1324,8 @@ void ipoib_dev_cleanup(struct net_device *dev)
 
        priv->rx_ring = NULL;
        priv->tx_ring = NULL;
+
+       ipoib_neigh_hash_uninit(dev);
 }
 
 static const struct header_ops ipoib_header_ops = {
@@ -1646,7 +1636,7 @@ register_failed:
        /* Stop GC if started before flush */
        set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
        cancel_delayed_work(&priv->neigh_reap_task);
-       flush_workqueue(priv->wq);
+       flush_workqueue(ipoib_workqueue);
 
 event_failed:
        ipoib_dev_cleanup(priv->dev);
@@ -1717,7 +1707,7 @@ static void ipoib_remove_one(struct ib_device *device)
                /* Stop GC */
                set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
                cancel_delayed_work(&priv->neigh_reap_task);
-               flush_workqueue(priv->wq);
+               flush_workqueue(ipoib_workqueue);
 
                unregister_netdev(priv->dev);
                free_netdev(priv->dev);
@@ -1758,13 +1748,8 @@ static int __init ipoib_init_module(void)
         * unregister_netdev() and linkwatch_event take the rtnl lock,
         * so flush_scheduled_work() can deadlock during device
         * removal.
-        *
-        * In addition, bringing one device up and another down at the
-        * same time can deadlock a single workqueue, so we have this
-        * global fallback workqueue, but we also attempt to open a
-        * per device workqueue each time we bring an interface up
         */
-       ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
+       ipoib_workqueue = create_singlethread_workqueue("ipoib");
        if (!ipoib_workqueue) {
                ret = -ENOMEM;
                goto err_fs;
index bc50dd0d0e4dad7790725b0414d807d42fe82493..ffb83b5f7e805e411f1506d66a53f8465b90c439 100644 (file)
@@ -190,6 +190,12 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
                spin_unlock_irq(&priv->lock);
                priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
                set_qkey = 1;
+
+               if (!ipoib_cm_admin_enabled(dev)) {
+                       rtnl_lock();
+                       dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
+                       rtnl_unlock();
+               }
        }
 
        if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -271,27 +277,16 @@ ipoib_mcast_sendonly_join_complete(int status,
        struct ipoib_mcast *mcast = multicast->context;
        struct net_device *dev = mcast->dev;
 
-       /*
-        * We have to take the mutex to force mcast_sendonly_join to
-        * return from ib_sa_multicast_join and set mcast->mc to a
-        * valid value.  Otherwise we were racing with ourselves in
-        * that we might fail here, but get a valid return from
-        * ib_sa_multicast_join after we had cleared mcast->mc here,
-        * resulting in mis-matched joins and leaves and a deadlock
-        */
-       mutex_lock(&mcast_mutex);
-
        /* We trap for port events ourselves. */
        if (status == -ENETRESET)
-               goto out;
+               return 0;
 
        if (!status)
                status = ipoib_mcast_join_finish(mcast, &multicast->rec);
 
        if (status) {
                if (mcast->logcount++ < 20)
-                       ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast "
-                                       "join failed for %pI6, status %d\n",
+                       ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
                                        mcast->mcmember.mgid.raw, status);
 
                /* Flush out any queued packets */
@@ -301,15 +296,11 @@ ipoib_mcast_sendonly_join_complete(int status,
                        dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
                }
                netif_tx_unlock_bh(dev);
+
+               /* Clear the busy flag so we try again */
+               status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
+                                           &mcast->flags);
        }
-out:
-       clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-       if (status)
-               mcast->mc = NULL;
-       complete(&mcast->done);
-       if (status == -ENETRESET)
-               status = 0;
-       mutex_unlock(&mcast_mutex);
        return status;
 }
 
@@ -327,14 +318,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
        int ret = 0;
 
        if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
-               ipoib_dbg_mcast(priv, "device shutting down, no sendonly "
-                               "multicast joins\n");
+               ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
                return -ENODEV;
        }
 
-       if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
-               ipoib_dbg_mcast(priv, "multicast entry busy, skipping "
-                               "sendonly join\n");
+       if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
+               ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
                return -EBUSY;
        }
 
@@ -342,9 +331,6 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
        rec.port_gid = priv->local_gid;
        rec.pkey     = cpu_to_be16(priv->pkey);
 
-       mutex_lock(&mcast_mutex);
-       init_completion(&mcast->done);
-       set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
        mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
                                         priv->port, &rec,
                                         IB_SA_MCMEMBER_REC_MGID        |
@@ -357,14 +343,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
        if (IS_ERR(mcast->mc)) {
                ret = PTR_ERR(mcast->mc);
                clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-               complete(&mcast->done);
-               ipoib_warn(priv, "ib_sa_join_multicast for sendonly join "
-                          "failed (ret = %d)\n", ret);
+               ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
+                          ret);
        } else {
-               ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting "
-                               "sendonly join\n", mcast->mcmember.mgid.raw);
+               ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
+                               mcast->mcmember.mgid.raw);
        }
-       mutex_unlock(&mcast_mutex);
 
        return ret;
 }
@@ -375,29 +359,18 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
                                                   carrier_on_task);
        struct ib_port_attr attr;
 
+       /*
+        * Take rtnl_lock to avoid racing with ipoib_stop() and
+        * turning the carrier back on while a device is being
+        * removed.
+        */
        if (ib_query_port(priv->ca, priv->port, &attr) ||
            attr.state != IB_PORT_ACTIVE) {
                ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
                return;
        }
 
-       /*
-        * Take rtnl_lock to avoid racing with ipoib_stop() and
-        * turning the carrier back on while a device is being
-        * removed.  However, ipoib_stop() will attempt to flush
-        * the workqueue while holding the rtnl lock, so loop
-        * on trylock until either we get the lock or we see
-        * FLAG_ADMIN_UP go away as that signals that we are bailing
-        * and can safely ignore the carrier on work.
-        */
-       while (!rtnl_trylock()) {
-               if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
-                       return;
-               else
-                       msleep(20);
-       }
-       if (!ipoib_cm_admin_enabled(priv->dev))
-               dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
+       rtnl_lock();
        netif_carrier_on(priv->dev);
        rtnl_unlock();
 }
@@ -412,63 +385,60 @@ static int ipoib_mcast_join_complete(int status,
        ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
                        mcast->mcmember.mgid.raw, status);
 
-       /*
-        * We have to take the mutex to force mcast_join to
-        * return from ib_sa_multicast_join and set mcast->mc to a
-        * valid value.  Otherwise we were racing with ourselves in
-        * that we might fail here, but get a valid return from
-        * ib_sa_multicast_join after we had cleared mcast->mc here,
-        * resulting in mis-matched joins and leaves and a deadlock
-        */
-       mutex_lock(&mcast_mutex);
-
        /* We trap for port events ourselves. */
-       if (status == -ENETRESET)
+       if (status == -ENETRESET) {
+               status = 0;
                goto out;
+       }
 
        if (!status)
                status = ipoib_mcast_join_finish(mcast, &multicast->rec);
 
        if (!status) {
                mcast->backoff = 1;
+               mutex_lock(&mcast_mutex);
                if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
-                       queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+                       queue_delayed_work(ipoib_workqueue,
+                                          &priv->mcast_task, 0);
+               mutex_unlock(&mcast_mutex);
 
                /*
-                * Defer carrier on work to priv->wq to avoid a
+                * Defer carrier on work to ipoib_workqueue to avoid a
                 * deadlock on rtnl_lock here.
                 */
                if (mcast == priv->broadcast)
-                       queue_work(priv->wq, &priv->carrier_on_task);
-       } else {
-               if (mcast->logcount++ < 20) {
-                       if (status == -ETIMEDOUT || status == -EAGAIN) {
-                               ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
-                                               mcast->mcmember.mgid.raw, status);
-                       } else {
-                               ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
-                                          mcast->mcmember.mgid.raw, status);
-                       }
-               }
+                       queue_work(ipoib_workqueue, &priv->carrier_on_task);
 
-               mcast->backoff *= 2;
-               if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
-                       mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+               status = 0;
+               goto out;
        }
-out:
+
+       if (mcast->logcount++ < 20) {
+               if (status == -ETIMEDOUT || status == -EAGAIN) {
+                       ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
+                                       mcast->mcmember.mgid.raw, status);
+               } else {
+                       ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
+                                  mcast->mcmember.mgid.raw, status);
+               }
+       }
+
+       mcast->backoff *= 2;
+       if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
+               mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+
+       /* Clear the busy flag so we try again */
+       status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+
+       mutex_lock(&mcast_mutex);
        spin_lock_irq(&priv->lock);
-       clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-       if (status)
-               mcast->mc = NULL;
-       complete(&mcast->done);
-       if (status == -ENETRESET)
-               status = 0;
-       if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags))
-               queue_delayed_work(priv->wq, &priv->mcast_task,
+       if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
+               queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
                                   mcast->backoff * HZ);
        spin_unlock_irq(&priv->lock);
        mutex_unlock(&mcast_mutex);
-
+out:
+       complete(&mcast->done);
        return status;
 }
 
@@ -517,9 +487,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
                rec.hop_limit     = priv->broadcast->mcmember.hop_limit;
        }
 
-       mutex_lock(&mcast_mutex);
-       init_completion(&mcast->done);
        set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+       init_completion(&mcast->done);
+       set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
+
        mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
                                         &rec, comp_mask, GFP_KERNEL,
                                         ipoib_mcast_join_complete, mcast);
@@ -533,11 +504,13 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
                if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
                        mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
 
+               mutex_lock(&mcast_mutex);
                if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
-                       queue_delayed_work(priv->wq, &priv->mcast_task,
+                       queue_delayed_work(ipoib_workqueue,
+                                          &priv->mcast_task,
                                           mcast->backoff * HZ);
+               mutex_unlock(&mcast_mutex);
        }
-       mutex_unlock(&mcast_mutex);
 }
 
 void ipoib_mcast_join_task(struct work_struct *work)
@@ -574,8 +547,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
                        ipoib_warn(priv, "failed to allocate broadcast group\n");
                        mutex_lock(&mcast_mutex);
                        if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
-                               queue_delayed_work(priv->wq, &priv->mcast_task,
-                                                  HZ);
+                               queue_delayed_work(ipoib_workqueue,
+                                                  &priv->mcast_task, HZ);
                        mutex_unlock(&mcast_mutex);
                        return;
                }
@@ -590,8 +563,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
        }
 
        if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
-               if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
-                   !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
+               if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
                        ipoib_mcast_join(dev, priv->broadcast, 0);
                return;
        }
@@ -599,33 +571,23 @@ void ipoib_mcast_join_task(struct work_struct *work)
        while (1) {
                struct ipoib_mcast *mcast = NULL;
 
-               /*
-                * Need the mutex so our flags are consistent, need the
-                * priv->lock so we don't race with list removals in either
-                * mcast_dev_flush or mcast_restart_task
-                */
-               mutex_lock(&mcast_mutex);
                spin_lock_irq(&priv->lock);
                list_for_each_entry(mcast, &priv->multicast_list, list) {
-                       if (IS_ERR_OR_NULL(mcast->mc) &&
-                           !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) &&
-                           !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
+                       if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
+                           && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
+                           && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
                                /* Found the next unjoined group */
                                break;
                        }
                }
                spin_unlock_irq(&priv->lock);
-               mutex_unlock(&mcast_mutex);
 
                if (&mcast->list == &priv->multicast_list) {
                        /* All done */
                        break;
                }
 
-               if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
-                       ipoib_mcast_sendonly_join(mcast);
-               else
-                       ipoib_mcast_join(dev, mcast, 1);
+               ipoib_mcast_join(dev, mcast, 1);
                return;
        }
 
@@ -642,13 +604,13 @@ int ipoib_mcast_start_thread(struct net_device *dev)
 
        mutex_lock(&mcast_mutex);
        if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
-               queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+               queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
        mutex_unlock(&mcast_mutex);
 
        return 0;
 }
 
-int ipoib_mcast_stop_thread(struct net_device *dev)
+int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
@@ -659,7 +621,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
        cancel_delayed_work(&priv->mcast_task);
        mutex_unlock(&mcast_mutex);
 
-       flush_workqueue(priv->wq);
+       if (flush)
+               flush_workqueue(ipoib_workqueue);
 
        return 0;
 }
@@ -670,9 +633,6 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
        int ret = 0;
 
        if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
-               ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
-
-       if (!IS_ERR_OR_NULL(mcast->mc))
                ib_sa_free_multicast(mcast->mc);
 
        if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
@@ -725,8 +685,6 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
                memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
                __ipoib_mcast_add(dev, mcast);
                list_add_tail(&mcast->list, &priv->multicast_list);
-               if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
-                       queue_delayed_work(priv->wq, &priv->mcast_task, 0);
        }
 
        if (!mcast->ah) {
@@ -740,6 +698,8 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
                if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
                        ipoib_dbg_mcast(priv, "no address vector, "
                                        "but multicast join already started\n");
+               else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+                       ipoib_mcast_sendonly_join(mcast);
 
                /*
                 * If lookup completes between here and out:, don't
@@ -799,12 +759,9 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
 
        spin_unlock_irqrestore(&priv->lock, flags);
 
-       /*
-        * make sure the in-flight joins have finished before we attempt
-        * to leave
-        */
+       /* seperate between the wait to the leave*/
        list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
-               if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+               if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
                        wait_for_completion(&mcast->done);
 
        list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
@@ -837,6 +794,8 @@ void ipoib_mcast_restart_task(struct work_struct *work)
 
        ipoib_dbg_mcast(priv, "restarting multicast task\n");
 
+       ipoib_mcast_stop_thread(dev, 0);
+
        local_irq_save(flags);
        netif_addr_lock(dev);
        spin_lock(&priv->lock);
@@ -921,38 +880,14 @@ void ipoib_mcast_restart_task(struct work_struct *work)
        netif_addr_unlock(dev);
        local_irq_restore(flags);
 
-       /*
-        * make sure the in-flight joins have finished before we attempt
-        * to leave
-        */
-       list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
-               if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
-                       wait_for_completion(&mcast->done);
-
-       /*
-        * We have to cancel outside of the spinlock, but we have to
-        * take the rtnl lock or else we race with the removal of
-        * entries from the remove list in mcast_dev_flush as part
-        * of ipoib_stop().  We detect the drop of the ADMIN_UP flag
-        * to signal that we have hit this particular race, and we
-        * return since we know we don't need to do anything else
-        * anyway.
-        */
-       while (!rtnl_trylock()) {
-               if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
-                       return;
-               else
-                       msleep(20);
-       }
+       /* We have to cancel outside of the spinlock */
        list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
                ipoib_mcast_leave(mcast->dev, mcast);
                ipoib_mcast_free(mcast);
        }
-       /*
-        * Restart our join task if needed
-        */
-       ipoib_mcast_start_thread(dev);
-       rtnl_unlock();
+
+       if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+               ipoib_mcast_start_thread(dev);
 }
 
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
index b72a753eb41dc3031608269c56434ed507b96f5f..c56d5d44c53b3f11725b6d6da220ea2c440fe496 100644 (file)
@@ -145,20 +145,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
        int ret, size;
        int i;
 
-       /*
-        * the various IPoIB tasks assume they will never race against
-        * themselves, so always use a single thread workqueue
-        */
-       priv->wq = create_singlethread_workqueue("ipoib_wq");
-       if (!priv->wq) {
-               printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
-               return -ENODEV;
-       }
-
        priv->pd = ib_alloc_pd(priv->ca);
        if (IS_ERR(priv->pd)) {
                printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
-               goto out_free_wq;
+               return -ENODEV;
        }
 
        priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
@@ -252,10 +242,6 @@ out_free_mr:
 
 out_free_pd:
        ib_dealloc_pd(priv->pd);
-
-out_free_wq:
-       destroy_workqueue(priv->wq);
-       priv->wq = NULL;
        return -ENODEV;
 }
 
@@ -284,12 +270,6 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
 
        if (ib_dealloc_pd(priv->pd))
                ipoib_warn(priv, "ib_dealloc_pd failed\n");
-
-       if (priv->wq) {
-               flush_workqueue(priv->wq);
-               destroy_workqueue(priv->wq);
-               priv->wq = NULL;
-       }
 }
 
 void ipoib_event(struct ib_event_handler *handler,
index 8afa28e4570ed099bb3fb9fc4b2d7e1c1a5ba9d6..18d4b2c8fe55092aa8e31c411faf051d968fb49d 100644 (file)
 #include <linux/cdev.h>
 #include "input-compat.h"
 
+enum evdev_clock_type {
+       EV_CLK_REAL = 0,
+       EV_CLK_MONO,
+       EV_CLK_BOOT,
+       EV_CLK_MAX
+};
+
 struct evdev {
        int open;
        struct input_handle handle;
@@ -49,12 +56,32 @@ struct evdev_client {
        struct fasync_struct *fasync;
        struct evdev *evdev;
        struct list_head node;
-       int clkid;
+       int clk_type;
        bool revoked;
        unsigned int bufsize;
        struct input_event buffer[];
 };
 
+static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
+{
+       switch (clkid) {
+
+       case CLOCK_REALTIME:
+               client->clk_type = EV_CLK_REAL;
+               break;
+       case CLOCK_MONOTONIC:
+               client->clk_type = EV_CLK_MONO;
+               break;
+       case CLOCK_BOOTTIME:
+               client->clk_type = EV_CLK_BOOT;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 /* flush queued events of type @type, caller must hold client->buffer_lock */
 static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
 {
@@ -108,8 +135,11 @@ static void evdev_queue_syn_dropped(struct evdev_client *client)
        struct input_event ev;
        ktime_t time;
 
-       time = (client->clkid == CLOCK_MONOTONIC) ?
-               ktime_get() : ktime_get_real();
+       time = client->clk_type == EV_CLK_REAL ?
+                       ktime_get_real() :
+                       client->clk_type == EV_CLK_MONO ?
+                               ktime_get() :
+                               ktime_get_boottime();
 
        ev.time = ktime_to_timeval(time);
        ev.type = EV_SYN;
@@ -159,7 +189,7 @@ static void __pass_event(struct evdev_client *client,
 
 static void evdev_pass_values(struct evdev_client *client,
                        const struct input_value *vals, unsigned int count,
-                       ktime_t mono, ktime_t real)
+                       ktime_t *ev_time)
 {
        struct evdev *evdev = client->evdev;
        const struct input_value *v;
@@ -169,8 +199,7 @@ static void evdev_pass_values(struct evdev_client *client,
        if (client->revoked)
                return;
 
-       event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ?
-                                     mono : real);
+       event.time = ktime_to_timeval(ev_time[client->clk_type]);
 
        /* Interrupts are disabled, just acquire the lock. */
        spin_lock(&client->buffer_lock);
@@ -198,21 +227,22 @@ static void evdev_events(struct input_handle *handle,
 {
        struct evdev *evdev = handle->private;
        struct evdev_client *client;
-       ktime_t time_mono, time_real;
+       ktime_t ev_time[EV_CLK_MAX];
 
-       time_mono = ktime_get();
-       time_real = ktime_mono_to_real(time_mono);
+       ev_time[EV_CLK_MONO] = ktime_get();
+       ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]);
+       ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO],
+                                                TK_OFFS_BOOT);
 
        rcu_read_lock();
 
        client = rcu_dereference(evdev->grab);
 
        if (client)
-               evdev_pass_values(client, vals, count, time_mono, time_real);
+               evdev_pass_values(client, vals, count, ev_time);
        else
                list_for_each_entry_rcu(client, &evdev->client_list, node)
-                       evdev_pass_values(client, vals, count,
-                                         time_mono, time_real);
+                       evdev_pass_values(client, vals, count, ev_time);
 
        rcu_read_unlock();
 }
@@ -877,10 +907,8 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
        case EVIOCSCLOCKID:
                if (copy_from_user(&i, p, sizeof(unsigned int)))
                        return -EFAULT;
-               if (i != CLOCK_MONOTONIC && i != CLOCK_REALTIME)
-                       return -EINVAL;
-               client->clkid = i;
-               return 0;
+
+               return evdev_set_clk_type(client, i);
 
        case EVIOCGKEYCODE:
                return evdev_handle_get_keycode(dev, p);
index 04217c2e345c0ddcaa8e5b1d2525dbb14b5854fe..213e3a1903ee1ddecf84a797ba19da6fa8d7b50c 100644 (file)
@@ -1974,18 +1974,22 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
 
        events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
 
-       for (i = 0; i < ABS_CNT; i++) {
-               if (test_bit(i, dev->absbit)) {
-                       if (input_is_mt_axis(i))
-                               events += mt_slots;
-                       else
-                               events++;
+       if (test_bit(EV_ABS, dev->evbit)) {
+               for (i = 0; i < ABS_CNT; i++) {
+                       if (test_bit(i, dev->absbit)) {
+                               if (input_is_mt_axis(i))
+                                       events += mt_slots;
+                               else
+                                       events++;
+                       }
                }
        }
 
-       for (i = 0; i < REL_CNT; i++)
-               if (test_bit(i, dev->relbit))
-                       events++;
+       if (test_bit(EV_REL, dev->evbit)) {
+               for (i = 0; i < REL_CNT; i++)
+                       if (test_bit(i, dev->relbit))
+                               events++;
+       }
 
        /* Make room for KEY and MSC events */
        events += 7;
index 96ee26c555e02dd2b69530382170ae3e365facc4..a5d9b3f3c8714ee5a307e166afa4bf90e23864bf 100644 (file)
@@ -559,6 +559,7 @@ config KEYBOARD_SH_KEYSC
 config KEYBOARD_STMPE
        tristate "STMPE keypad support"
        depends on MFD_STMPE
+       depends on OF
        select INPUT_MATRIXKMAP
        help
          Say Y here if you want to use the keypad controller on STMPE I/O
index d4dd78a7d56b5b1bf3dbe8865025bd5559a82ce2..883d6aed5b9ac12f47bc3a137bbf8e61c0bd1ec2 100644 (file)
 struct gpio_button_data {
        const struct gpio_keys_button *button;
        struct input_dev *input;
-       struct timer_list timer;
-       struct work_struct work;
-       unsigned int timer_debounce;    /* in msecs */
+
+       struct timer_list release_timer;
+       unsigned int release_delay;     /* in msecs, for IRQ-only buttons */
+
+       struct delayed_work work;
+       unsigned int software_debounce; /* in msecs, for GPIO-driven buttons */
+
        unsigned int irq;
        spinlock_t lock;
        bool disabled;
@@ -116,11 +120,14 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
 {
        if (!bdata->disabled) {
                /*
-                * Disable IRQ and possible debouncing timer.
+                * Disable IRQ and associated timer/work structure.
                 */
                disable_irq(bdata->irq);
-               if (bdata->timer_debounce)
-                       del_timer_sync(&bdata->timer);
+
+               if (gpio_is_valid(bdata->button->gpio))
+                       cancel_delayed_work_sync(&bdata->work);
+               else
+                       del_timer_sync(&bdata->release_timer);
 
                bdata->disabled = true;
        }
@@ -343,7 +350,7 @@ static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
 static void gpio_keys_gpio_work_func(struct work_struct *work)
 {
        struct gpio_button_data *bdata =
-               container_of(work, struct gpio_button_data, work);
+               container_of(work, struct gpio_button_data, work.work);
 
        gpio_keys_gpio_report_event(bdata);
 
@@ -351,13 +358,6 @@ static void gpio_keys_gpio_work_func(struct work_struct *work)
                pm_relax(bdata->input->dev.parent);
 }
 
-static void gpio_keys_gpio_timer(unsigned long _data)
-{
-       struct gpio_button_data *bdata = (struct gpio_button_data *)_data;
-
-       schedule_work(&bdata->work);
-}
-
 static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
 {
        struct gpio_button_data *bdata = dev_id;
@@ -366,11 +366,10 @@ static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
 
        if (bdata->button->wakeup)
                pm_stay_awake(bdata->input->dev.parent);
-       if (bdata->timer_debounce)
-               mod_timer(&bdata->timer,
-                       jiffies + msecs_to_jiffies(bdata->timer_debounce));
-       else
-               schedule_work(&bdata->work);
+
+       mod_delayed_work(system_wq,
+                        &bdata->work,
+                        msecs_to_jiffies(bdata->software_debounce));
 
        return IRQ_HANDLED;
 }
@@ -408,7 +407,7 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
                input_event(input, EV_KEY, button->code, 1);
                input_sync(input);
 
-               if (!bdata->timer_debounce) {
+               if (!bdata->release_delay) {
                        input_event(input, EV_KEY, button->code, 0);
                        input_sync(input);
                        goto out;
@@ -417,9 +416,9 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
                bdata->key_pressed = true;
        }
 
-       if (bdata->timer_debounce)
-               mod_timer(&bdata->timer,
-                       jiffies + msecs_to_jiffies(bdata->timer_debounce));
+       if (bdata->release_delay)
+               mod_timer(&bdata->release_timer,
+                       jiffies + msecs_to_jiffies(bdata->release_delay));
 out:
        spin_unlock_irqrestore(&bdata->lock, flags);
        return IRQ_HANDLED;
@@ -429,10 +428,10 @@ static void gpio_keys_quiesce_key(void *data)
 {
        struct gpio_button_data *bdata = data;
 
-       if (bdata->timer_debounce)
-               del_timer_sync(&bdata->timer);
-
-       cancel_work_sync(&bdata->work);
+       if (gpio_is_valid(bdata->button->gpio))
+               cancel_delayed_work_sync(&bdata->work);
+       else
+               del_timer_sync(&bdata->release_timer);
 }
 
 static int gpio_keys_setup_key(struct platform_device *pdev,
@@ -466,23 +465,25 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
                                        button->debounce_interval * 1000);
                        /* use timer if gpiolib doesn't provide debounce */
                        if (error < 0)
-                               bdata->timer_debounce =
+                               bdata->software_debounce =
                                                button->debounce_interval;
                }
 
-               irq = gpio_to_irq(button->gpio);
-               if (irq < 0) {
-                       error = irq;
-                       dev_err(dev,
-                               "Unable to get irq number for GPIO %d, error %d\n",
-                               button->gpio, error);
-                       return error;
+               if (button->irq) {
+                       bdata->irq = button->irq;
+               } else {
+                       irq = gpio_to_irq(button->gpio);
+                       if (irq < 0) {
+                               error = irq;
+                               dev_err(dev,
+                                       "Unable to get irq number for GPIO %d, error %d\n",
+                                       button->gpio, error);
+                               return error;
+                       }
+                       bdata->irq = irq;
                }
-               bdata->irq = irq;
 
-               INIT_WORK(&bdata->work, gpio_keys_gpio_work_func);
-               setup_timer(&bdata->timer,
-                           gpio_keys_gpio_timer, (unsigned long)bdata);
+               INIT_DELAYED_WORK(&bdata->work, gpio_keys_gpio_work_func);
 
                isr = gpio_keys_gpio_isr;
                irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
@@ -499,8 +500,8 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
                        return -EINVAL;
                }
 
-               bdata->timer_debounce = button->debounce_interval;
-               setup_timer(&bdata->timer,
+               bdata->release_delay = button->debounce_interval;
+               setup_timer(&bdata->release_timer,
                            gpio_keys_irq_timer, (unsigned long)bdata);
 
                isr = gpio_keys_irq_isr;
@@ -510,7 +511,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
        input_set_capability(input, button->type ?: EV_KEY, button->code);
 
        /*
-        * Install custom action to cancel debounce timer and
+        * Install custom action to cancel release timer and
         * workqueue item.
         */
        error = devm_add_action(&pdev->dev, gpio_keys_quiesce_key, bdata);
@@ -618,33 +619,30 @@ gpio_keys_get_devtree_pdata(struct device *dev)
 
        i = 0;
        for_each_child_of_node(node, pp) {
-               int gpio = -1;
                enum of_gpio_flags flags;
 
                button = &pdata->buttons[i++];
 
-               if (!of_find_property(pp, "gpios", NULL)) {
-                       button->irq = irq_of_parse_and_map(pp, 0);
-                       if (button->irq == 0) {
-                               i--;
-                               pdata->nbuttons--;
-                               dev_warn(dev, "Found button without gpios or irqs\n");
-                               continue;
-                       }
-               } else {
-                       gpio = of_get_gpio_flags(pp, 0, &flags);
-                       if (gpio < 0) {
-                               error = gpio;
+               button->gpio = of_get_gpio_flags(pp, 0, &flags);
+               if (button->gpio < 0) {
+                       error = button->gpio;
+                       if (error != -ENOENT) {
                                if (error != -EPROBE_DEFER)
                                        dev_err(dev,
                                                "Failed to get gpio flags, error: %d\n",
                                                error);
                                return ERR_PTR(error);
                        }
+               } else {
+                       button->active_low = flags & OF_GPIO_ACTIVE_LOW;
                }
 
-               button->gpio = gpio;
-               button->active_low = flags & OF_GPIO_ACTIVE_LOW;
+               button->irq = irq_of_parse_and_map(pp, 0);
+
+               if (!gpio_is_valid(button->gpio) && !button->irq) {
+                       dev_err(dev, "Found button without gpios or irqs\n");
+                       return ERR_PTR(-EINVAL);
+               }
 
                if (of_property_read_u32(pp, "linux,code", &button->code)) {
                        dev_err(dev, "Button without keycode: 0x%x\n",
@@ -659,6 +657,8 @@ gpio_keys_get_devtree_pdata(struct device *dev)
 
                button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
 
+               button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
+
                if (of_property_read_u32(pp, "debounce-interval",
                                         &button->debounce_interval))
                        button->debounce_interval = 5;
index 610a8af795a1f5b090b73d7bb8919bf41d3e331a..5b152f25a8e1ff72e613608f08876b3098627469 100644 (file)
@@ -473,7 +473,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
        if (error)
                goto bail1;
 
-       init_completion(&dev->cmd_done);
+       reinit_completion(&dev->cmd_done);
        serio_write(serio, 0);
        serio_write(serio, 0);
        serio_write(serio, HIL_PKT_CMD >> 8);
@@ -482,7 +482,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
        if (error)
                goto bail1;
 
-       init_completion(&dev->cmd_done);
+       reinit_completion(&dev->cmd_done);
        serio_write(serio, 0);
        serio_write(serio, 0);
        serio_write(serio, HIL_PKT_CMD >> 8);
@@ -491,7 +491,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
        if (error)
                goto bail1;
 
-       init_completion(&dev->cmd_done);
+       reinit_completion(&dev->cmd_done);
        serio_write(serio, 0);
        serio_write(serio, 0);
        serio_write(serio, HIL_PKT_CMD >> 8);
index ef5e67fb567e701365767a9949dd3ea722e4f260..fe6e3f22eed76157c42a1d9b873b01e34f38b5f2 100644 (file)
 #define STMPE_KEYPAD_MAX_ROWS          8
 #define STMPE_KEYPAD_MAX_COLS          8
 #define STMPE_KEYPAD_ROW_SHIFT         3
-#define STMPE_KEYPAD_KEYMAP_SIZE       \
+#define STMPE_KEYPAD_KEYMAP_MAX_SIZE \
        (STMPE_KEYPAD_MAX_ROWS * STMPE_KEYPAD_MAX_COLS)
 
 /**
  * struct stmpe_keypad_variant - model-specific attributes
  * @auto_increment: whether the KPC_DATA_BYTE register address
  *                 auto-increments on multiple read
+ * @set_pullup: whether the pins need to have their pull-ups set
  * @num_data: number of data bytes
  * @num_normal_data: number of normal keys' data bytes
  * @max_cols: maximum number of columns supported
@@ -61,6 +62,7 @@
  */
 struct stmpe_keypad_variant {
        bool            auto_increment;
+       bool            set_pullup;
        int             num_data;
        int             num_normal_data;
        int             max_cols;
@@ -81,6 +83,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
        },
        [STMPE2401] = {
                .auto_increment         = false,
+               .set_pullup             = true,
                .num_data               = 3,
                .num_normal_data        = 2,
                .max_cols               = 8,
@@ -90,6 +93,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
        },
        [STMPE2403] = {
                .auto_increment         = true,
+               .set_pullup             = true,
                .num_data               = 5,
                .num_normal_data        = 3,
                .max_cols               = 8,
@@ -99,16 +103,30 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
        },
 };
 
+/**
+ * struct stmpe_keypad - STMPE keypad state container
+ * @stmpe: pointer to parent STMPE device
+ * @input: spawned input device
+ * @variant: STMPE variant
+ * @debounce_ms: debounce interval, in ms.  Maximum is
+ *              %STMPE_KEYPAD_MAX_DEBOUNCE.
+ * @scan_count: number of key scanning cycles to confirm key data.
+ *             Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT.
+ * @no_autorepeat: disable key autorepeat
+ * @rows: bitmask for the rows
+ * @cols: bitmask for the columns
+ * @keymap: the keymap
+ */
 struct stmpe_keypad {
        struct stmpe *stmpe;
        struct input_dev *input;
        const struct stmpe_keypad_variant *variant;
-       const struct stmpe_keypad_platform_data *plat;
-
+       unsigned int debounce_ms;
+       unsigned int scan_count;
+       bool no_autorepeat;
        unsigned int rows;
        unsigned int cols;
-
-       unsigned short keymap[STMPE_KEYPAD_KEYMAP_SIZE];
+       unsigned short keymap[STMPE_KEYPAD_KEYMAP_MAX_SIZE];
 };
 
 static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data)
@@ -171,7 +189,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
        unsigned int col_gpios = variant->col_gpios;
        unsigned int row_gpios = variant->row_gpios;
        struct stmpe *stmpe = keypad->stmpe;
+       u8 pureg = stmpe->regs[STMPE_IDX_GPPUR_LSB];
        unsigned int pins = 0;
+       unsigned int pu_pins = 0;
+       int ret;
        int i;
 
        /*
@@ -188,8 +209,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
        for (i = 0; i < variant->max_cols; i++) {
                int num = __ffs(col_gpios);
 
-               if (keypad->cols & (1 << i))
+               if (keypad->cols & (1 << i)) {
                        pins |= 1 << num;
+                       pu_pins |= 1 << num;
+               }
 
                col_gpios &= ~(1 << num);
        }
@@ -203,20 +226,43 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
                row_gpios &= ~(1 << num);
        }
 
-       return stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
+       ret = stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
+       if (ret)
+               return ret;
+
+       /*
+        * On STMPE24xx, set pin bias to pull-up on all keypad input
+        * pins (columns), this incidentally happen to be maximum 8 pins
+        * and placed at GPIO0-7 so only the LSB of the pull up register
+        * ever needs to be written.
+        */
+       if (variant->set_pullup) {
+               u8 val;
+
+               ret = stmpe_reg_read(stmpe, pureg);
+               if (ret)
+                       return ret;
+
+               /* Do not touch unused pins, may be used for GPIO */
+               val = ret & ~pu_pins;
+               val |= pu_pins;
+
+               ret = stmpe_reg_write(stmpe, pureg, val);
+       }
+
+       return 0;
 }
 
 static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
 {
-       const struct stmpe_keypad_platform_data *plat = keypad->plat;
        const struct stmpe_keypad_variant *variant = keypad->variant;
        struct stmpe *stmpe = keypad->stmpe;
        int ret;
 
-       if (plat->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
+       if (keypad->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
                return -EINVAL;
 
-       if (plat->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
+       if (keypad->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
                return -EINVAL;
 
        ret = stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
@@ -245,7 +291,7 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
 
        ret = stmpe_set_bits(stmpe, STMPE_KPC_CTRL_MSB,
                             STMPE_KPC_CTRL_MSB_SCAN_COUNT,
-                            plat->scan_count << 4);
+                            keypad->scan_count << 4);
        if (ret < 0)
                return ret;
 
@@ -253,17 +299,18 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
                              STMPE_KPC_CTRL_LSB_SCAN |
                              STMPE_KPC_CTRL_LSB_DEBOUNCE,
                              STMPE_KPC_CTRL_LSB_SCAN |
-                             (plat->debounce_ms << 1));
+                             (keypad->debounce_ms << 1));
 }
 
-static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
+static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad,
+                                       u32 used_rows, u32 used_cols)
 {
        int row, col;
 
-       for (row = 0; row < STMPE_KEYPAD_MAX_ROWS; row++) {
-               for (col = 0; col < STMPE_KEYPAD_MAX_COLS; col++) {
+       for (row = 0; row < used_rows; row++) {
+               for (col = 0; col < used_cols; col++) {
                        int code = MATRIX_SCAN_CODE(row, col,
-                                               STMPE_KEYPAD_ROW_SHIFT);
+                                                   STMPE_KEYPAD_ROW_SHIFT);
                        if (keypad->keymap[code] != KEY_RESERVED) {
                                keypad->rows |= 1 << row;
                                keypad->cols |= 1 << col;
@@ -272,51 +319,17 @@ static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
        }
 }
 
-#ifdef CONFIG_OF
-static const struct stmpe_keypad_platform_data *
-stmpe_keypad_of_probe(struct device *dev)
-{
-       struct device_node *np = dev->of_node;
-       struct stmpe_keypad_platform_data *plat;
-
-       if (!np)
-               return ERR_PTR(-ENODEV);
-
-       plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
-       if (!plat)
-               return ERR_PTR(-ENOMEM);
-
-       of_property_read_u32(np, "debounce-interval", &plat->debounce_ms);
-       of_property_read_u32(np, "st,scan-count", &plat->scan_count);
-
-       plat->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
-
-       return plat;
-}
-#else
-static inline const struct stmpe_keypad_platform_data *
-stmpe_keypad_of_probe(struct device *dev)
-{
-       return ERR_PTR(-EINVAL);
-}
-#endif
-
 static int stmpe_keypad_probe(struct platform_device *pdev)
 {
        struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
-       const struct stmpe_keypad_platform_data *plat;
+       struct device_node *np = pdev->dev.of_node;
        struct stmpe_keypad *keypad;
        struct input_dev *input;
+       u32 rows;
+       u32 cols;
        int error;
        int irq;
 
-       plat = stmpe->pdata->keypad;
-       if (!plat) {
-               plat = stmpe_keypad_of_probe(&pdev->dev);
-               if (IS_ERR(plat))
-                       return PTR_ERR(plat);
-       }
-
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
                return irq;
@@ -326,6 +339,13 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
        if (!keypad)
                return -ENOMEM;
 
+       keypad->stmpe = stmpe;
+       keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
+
+       of_property_read_u32(np, "debounce-interval", &keypad->debounce_ms);
+       of_property_read_u32(np, "st,scan-count", &keypad->scan_count);
+       keypad->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
+
        input = devm_input_allocate_device(&pdev->dev);
        if (!input)
                return -ENOMEM;
@@ -334,23 +354,22 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
        input->id.bustype = BUS_I2C;
        input->dev.parent = &pdev->dev;
 
-       error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
-                                          STMPE_KEYPAD_MAX_ROWS,
-                                          STMPE_KEYPAD_MAX_COLS,
+       error = matrix_keypad_parse_of_params(&pdev->dev, &rows, &cols);
+       if (error)
+               return error;
+
+       error = matrix_keypad_build_keymap(NULL, NULL, rows, cols,
                                           keypad->keymap, input);
        if (error)
                return error;
 
        input_set_capability(input, EV_MSC, MSC_SCAN);
-       if (!plat->no_autorepeat)
+       if (!keypad->no_autorepeat)
                __set_bit(EV_REP, input->evbit);
 
-       stmpe_keypad_fill_used_pins(keypad);
+       stmpe_keypad_fill_used_pins(keypad, rows, cols);
 
-       keypad->stmpe = stmpe;
-       keypad->plat = plat;
        keypad->input = input;
-       keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
 
        error = stmpe_keypad_chip_init(keypad);
        if (error < 0)
index d125a019383f10155dcafb88903f47e0f5297080..d88d73d835526a16d2e5e4e48c6a2562c802a4cf 100644 (file)
@@ -881,6 +881,34 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
                                          unsigned char *pkt,
                                          unsigned char pkt_id)
 {
+       /*
+        *       packet-fmt    b7   b6    b5   b4   b3   b2   b1   b0
+        * Byte0 TWO & MULTI    L    1     R    M    1 Y0-2 Y0-1 Y0-0
+        * Byte0 NEW            L    1  X1-5    1    1 Y0-2 Y0-1 Y0-0
+        * Byte1            Y0-10 Y0-9  Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
+        * Byte2            X0-11    1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
+        * Byte3            X1-11    1  X0-4 X0-3    1 X0-2 X0-1 X0-0
+        * Byte4 TWO        X1-10  TWO  X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
+        * Byte4 MULTI      X1-10  TWO  X1-9 X1-8 X1-7 X1-6 Y1-5    1
+        * Byte4 NEW        X1-10  TWO  X1-9 X1-8 X1-7 X1-6    0    0
+        * Byte5 TWO & NEW  Y1-10    0  Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
+        * Byte5 MULTI      Y1-10    0  Y1-9 Y1-8 Y1-7 Y1-6  F-1  F-0
+        * L:         Left button
+        * R / M:     Non-clickpads: Right / Middle button
+        *            Clickpads: When > 2 fingers are down, and some fingers
+        *            are in the button area, then the 2 coordinates reported
+        *            are for fingers outside the button area and these report
+        *            extra fingers being present in the right / left button
+        *            area. Note these fingers are not added to the F field!
+        *            so if a TWO packet is received and R = 1 then there are
+        *            3 fingers down, etc.
+        * TWO:       1: Two touches present, byte 0/4/5 are in TWO fmt
+        *            0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
+        *               otherwise byte 0 bit 4 must be set and byte 0/4/5 are
+        *               in NEW fmt
+        * F:         Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
+        */
+
        mt[0].x = ((pkt[2] & 0x80) << 4);
        mt[0].x |= ((pkt[2] & 0x3F) << 5);
        mt[0].x |= ((pkt[3] & 0x30) >> 1);
@@ -919,18 +947,21 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
 
 static int alps_get_mt_count(struct input_mt_pos *mt)
 {
-       int i;
+       int i, fingers = 0;
 
-       for (i = 0; i < MAX_TOUCHES && mt[i].x != 0 && mt[i].y != 0; i++)
-               /* empty */;
+       for (i = 0; i < MAX_TOUCHES; i++) {
+               if (mt[i].x != 0 || mt[i].y != 0)
+                       fingers++;
+       }
 
-       return i;
+       return fingers;
 }
 
 static int alps_decode_packet_v7(struct alps_fields *f,
                                  unsigned char *p,
                                  struct psmouse *psmouse)
 {
+       struct alps_data *priv = psmouse->private;
        unsigned char pkt_id;
 
        pkt_id = alps_get_packet_id_v7(p);
@@ -938,19 +969,52 @@ static int alps_decode_packet_v7(struct alps_fields *f,
                return 0;
        if (pkt_id == V7_PACKET_ID_UNKNOWN)
                return -1;
+       /*
+        * NEW packets are send to indicate a discontinuity in the finger
+        * coordinate reporting. Specifically a finger may have moved from
+        * slot 0 to 1 or vice versa. INPUT_MT_TRACK takes care of this for
+        * us.
+        *
+        * NEW packets have 3 problems:
+        * 1) They do not contain middle / right button info (on non clickpads)
+        *    this can be worked around by preserving the old button state
+        * 2) They do not contain an accurate fingercount, and they are
+        *    typically send when the number of fingers changes. We cannot use
+        *    the old finger count as that may mismatch with the amount of
+        *    touch coordinates we've available in the NEW packet
+        * 3) Their x data for the second touch is inaccurate leading to
+        *    a possible jump of the x coordinate by 16 units when the first
+        *    non NEW packet comes in
+        * Since problems 2 & 3 cannot be worked around, just ignore them.
+        */
+       if (pkt_id == V7_PACKET_ID_NEW)
+               return 1;
 
        alps_get_finger_coordinate_v7(f->mt, p, pkt_id);
 
-       if (pkt_id == V7_PACKET_ID_TWO || pkt_id == V7_PACKET_ID_MULTI) {
-               f->left = (p[0] & 0x80) >> 7;
+       if (pkt_id == V7_PACKET_ID_TWO)
+               f->fingers = alps_get_mt_count(f->mt);
+       else /* pkt_id == V7_PACKET_ID_MULTI */
+               f->fingers = 3 + (p[5] & 0x03);
+
+       f->left = (p[0] & 0x80) >> 7;
+       if (priv->flags & ALPS_BUTTONPAD) {
+               if (p[0] & 0x20)
+                       f->fingers++;
+               if (p[0] & 0x10)
+                       f->fingers++;
+       } else {
                f->right = (p[0] & 0x20) >> 5;
                f->middle = (p[0] & 0x10) >> 4;
        }
 
-       if (pkt_id == V7_PACKET_ID_TWO)
-               f->fingers = alps_get_mt_count(f->mt);
-       else if (pkt_id == V7_PACKET_ID_MULTI)
-               f->fingers = 3 + (p[5] & 0x03);
+       /* Sometimes a single touch is reported in mt[1] rather then mt[0] */
+       if (f->fingers == 1 && f->mt[0].x == 0 && f->mt[0].y == 0) {
+               f->mt[0].x = f->mt[1].x;
+               f->mt[0].y = f->mt[1].y;
+               f->mt[1].x = 0;
+               f->mt[1].y = 0;
+       }
 
        return 0;
 }
index f2b97802640755aacfcde04005b125717cb63818..6e22682c8255cfff41e1fc49f6a76198911aafc5 100644 (file)
@@ -1097,6 +1097,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
  * Asus UX31               0x361f00        20, 15, 0e      clickpad
  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
  * Avatar AVIU-145A2       0x361f00        ?               clickpad
+ * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
+ * Fujitsu LIFEBOOK E554   0x570f01        40, 14, 0c      2 hw buttons
  * Fujitsu H730            0x570f00        c0, 14, 0c      3 hw buttons (**)
  * Gigabyte U2442          0x450f01        58, 17, 0c      2 hw buttons
  * Lenovo L430             0x350f02        b9, 15, 0c      2 hw buttons (*)
@@ -1475,6 +1477,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
                },
        },
+       {
+               /* Fujitsu LIFEBOOK E554  does not work with crc_enabled == 0 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+               },
+       },
+       {
+               /* Fujitsu LIFEBOOK E544  does not work with crc_enabled == 0 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
+               },
+       },
 #endif
        { }
 };
@@ -1520,6 +1536,8 @@ static int elantech_set_properties(struct elantech_data *etd)
                case 7:
                case 8:
                case 9:
+               case 10:
+               case 13:
                        etd->hw_version = 4;
                        break;
                default:
index f9472920d986368f7aa83eb7d0621489d774b050..23e26e0768b54af037990dbe4999d0486470f56c 100644 (file)
@@ -135,8 +135,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
                1232, 5710, 1156, 4696
        },
        {
-               (const char * const []){"LEN0034", "LEN0036", "LEN0039",
-                                       "LEN2002", "LEN2004", NULL},
+               (const char * const []){"LEN0034", "LEN0036", "LEN0037",
+                                       "LEN0039", "LEN2002", "LEN2004",
+                                       NULL},
                1024, 5112, 2024, 4832
        },
        {
@@ -165,7 +166,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
        "LEN0035", /* X240 */
        "LEN0036", /* T440 */
-       "LEN0037",
+       "LEN0037", /* X1 Carbon 2nd */
        "LEN0038",
        "LEN0039", /* T440s */
        "LEN0041",
index 30c8b6998808fa452a19e437c33bd7e9db8a0888..354d47ecd66a01c8b0ab732eb038bbad19857d19 100644 (file)
@@ -227,6 +227,7 @@ TRACKPOINT_INT_ATTR(thresh, TP_THRESH, TP_DEF_THRESH);
 TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH, TP_DEF_UP_THRESH);
 TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
 TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
+TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
 
 TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0,
                    TP_DEF_PTSON);
@@ -246,6 +247,7 @@ static struct attribute *trackpoint_attrs[] = {
        &psmouse_attr_upthresh.dattr.attr,
        &psmouse_attr_ztime.dattr.attr,
        &psmouse_attr_jenks.dattr.attr,
+       &psmouse_attr_drift_time.dattr.attr,
        &psmouse_attr_press_to_select.dattr.attr,
        &psmouse_attr_skipback.dattr.attr,
        &psmouse_attr_ext_dev.dattr.attr,
@@ -312,6 +314,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
        TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, upthresh);
        TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, ztime);
        TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, jenks);
+       TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, drift_time);
 
        /* toggles */
        TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, press_to_select);
@@ -332,6 +335,7 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
        TRACKPOINT_SET_POWER_ON_DEFAULT(tp, upthresh);
        TRACKPOINT_SET_POWER_ON_DEFAULT(tp, ztime);
        TRACKPOINT_SET_POWER_ON_DEFAULT(tp, jenks);
+       TRACKPOINT_SET_POWER_ON_DEFAULT(tp, drift_time);
        TRACKPOINT_SET_POWER_ON_DEFAULT(tp, inertia);
 
        /* toggles */
index ecd0547964a570048a2ad9ecf228d9eda98290a3..5617ed3a7d7a15d0f9769086afb8c740b5739fcc 100644 (file)
@@ -70,6 +70,9 @@
 #define TP_UP_THRESH           0x5A    /* Used to generate a 'click' on Z-axis */
 #define TP_Z_TIME              0x5E    /* How sharp of a press */
 #define TP_JENKS_CURV          0x5D    /* Minimum curvature for double click */
+#define TP_DRIFT_TIME          0x5F    /* How long a 'hands off' condition */
+                                       /* must last (x*107ms) for drift */
+                                       /* correction to occur */
 
 /*
  * Toggling Flag bits
 #define TP_DEF_UP_THRESH       0xFF
 #define TP_DEF_Z_TIME          0x26
 #define TP_DEF_JENKS_CURV      0x87
+#define TP_DEF_DRIFT_TIME      0x05
 
 /* Toggles */
 #define TP_DEF_MB              0x00
@@ -137,6 +141,7 @@ struct trackpoint_data
        unsigned char draghys, mindrag;
        unsigned char thresh, upthresh;
        unsigned char ztime, jenks;
+       unsigned char drift_time;
 
        /* toggles */
        unsigned char press_to_select;
index c66d1b53843e326a246aadd9050cd089c69e112a..c11556563ef0633f746edc9bc9e3ebca80879e8b 100644 (file)
@@ -151,6 +151,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
                },
        },
+       {
+               /* Medion Akoya E7225 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+               },
+       },
        {
                /* Blue FB5601 */
                .matches = {
@@ -414,6 +422,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
                },
        },
+       {
+               /* Acer Aspire 7738 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
+               },
+       },
        {
                /* Gericom Bellagio */
                .matches = {
@@ -745,6 +760,35 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
        { }
 };
 
+/*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
+static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
+       {
+               /* Gigabyte P35 v2 - Elantech touchpad */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
+               },
+       },
+               {
+               /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+               },
+       },
+       {
+               /* Gigabyte P34 - Elantech touchpad */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+               },
+       },
+       { }
+};
+
 #endif /* CONFIG_X86 */
 
 #ifdef CONFIG_PNP
@@ -1040,6 +1084,9 @@ static int __init i8042_platform_init(void)
        if (dmi_check_system(i8042_dmi_dritek_table))
                i8042_dritek = true;
 
+       if (dmi_check_system(i8042_dmi_kbdreset_table))
+               i8042_kbdreset = true;
+
        /*
         * A20 was already enabled during early kernel init. But some buggy
         * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
index 924e4bf357fb2607bb14fdf32173cc049830c3ab..986a71c614b0461bce7825ba34fc78aefcd4a7e4 100644 (file)
@@ -67,6 +67,10 @@ static bool i8042_notimeout;
 module_param_named(notimeout, i8042_notimeout, bool, 0);
 MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
 
+static bool i8042_kbdreset;
+module_param_named(kbdreset, i8042_kbdreset, bool, 0);
+MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port");
+
 #ifdef CONFIG_X86
 static bool i8042_dritek;
 module_param_named(dritek, i8042_dritek, bool, 0);
@@ -789,6 +793,16 @@ static int __init i8042_check_aux(void)
        if (i8042_toggle_aux(true))
                return -1;
 
+/*
+ * Reset keyboard (needed on some laptops to successfully detect
+ * touchpad, e.g., some Gigabyte laptop models with Elantech
+ * touchpads).
+ */
+       if (i8042_kbdreset) {
+               pr_warn("Attempting to reset device connected to KBD port\n");
+               i8042_kbd_write(NULL, (unsigned char) 0xff);
+       }
+
 /*
  * Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and
  * used it for a PCI card or somethig else.
index bb070206223c1e9eeee4f4964a8f9f6ed848806b..95ee92a91bd21353bf58020c9d87fda42fad6633 100644 (file)
 #define MXT_T6_STATUS_COMSERR  (1 << 2)
 
 /* MXT_GEN_POWER_T7 field */
-struct t7_config {
-       u8 idle;
-       u8 active;
-} __packed;
-
-#define MXT_POWER_CFG_RUN              0
-#define MXT_POWER_CFG_DEEPSLEEP                1
+#define MXT_POWER_IDLEACQINT   0
+#define MXT_POWER_ACTVACQINT   1
+#define MXT_POWER_ACTV2IDLETO  2
 
 /* MXT_GEN_ACQUIRE_T8 field */
 #define MXT_ACQUIRE_CHRGTIME   0
@@ -117,6 +113,7 @@ struct t7_config {
 #define MXT_ACQUIRE_ATCHCALSTHR        7
 
 /* MXT_TOUCH_MULTI_T9 field */
+#define MXT_TOUCH_CTRL         0
 #define MXT_T9_ORIENT          9
 #define MXT_T9_RANGE           18
 
@@ -256,7 +253,6 @@ struct mxt_data {
        bool update_input;
        u8 last_message_count;
        u8 num_touchids;
-       struct t7_config t7_cfg;
 
        /* Cached parameters from object table */
        u16 T5_address;
@@ -672,6 +668,20 @@ static void mxt_proc_t6_messages(struct mxt_data *data, u8 *msg)
        data->t6_status = status;
 }
 
+static int mxt_write_object(struct mxt_data *data,
+                                u8 type, u8 offset, u8 val)
+{
+       struct mxt_object *object;
+       u16 reg;
+
+       object = mxt_get_object(data, type);
+       if (!object || offset >= mxt_obj_size(object))
+               return -EINVAL;
+
+       reg = object->start_address;
+       return mxt_write_reg(data->client, reg + offset, val);
+}
+
 static void mxt_input_button(struct mxt_data *data, u8 *message)
 {
        struct input_dev *input = data->input_dev;
@@ -1742,60 +1752,6 @@ err_free_object_table:
        return error;
 }
 
-static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
-{
-       struct device *dev = &data->client->dev;
-       int error;
-       struct t7_config *new_config;
-       struct t7_config deepsleep = { .active = 0, .idle = 0 };
-
-       if (sleep == MXT_POWER_CFG_DEEPSLEEP)
-               new_config = &deepsleep;
-       else
-               new_config = &data->t7_cfg;
-
-       error = __mxt_write_reg(data->client, data->T7_address,
-                               sizeof(data->t7_cfg), new_config);
-       if (error)
-               return error;
-
-       dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n",
-               new_config->active, new_config->idle);
-
-       return 0;
-}
-
-static int mxt_init_t7_power_cfg(struct mxt_data *data)
-{
-       struct device *dev = &data->client->dev;
-       int error;
-       bool retry = false;
-
-recheck:
-       error = __mxt_read_reg(data->client, data->T7_address,
-                               sizeof(data->t7_cfg), &data->t7_cfg);
-       if (error)
-               return error;
-
-       if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) {
-               if (!retry) {
-                       dev_dbg(dev, "T7 cfg zero, resetting\n");
-                       mxt_soft_reset(data);
-                       retry = true;
-                       goto recheck;
-               } else {
-                       dev_dbg(dev, "T7 cfg zero after reset, overriding\n");
-                       data->t7_cfg.active = 20;
-                       data->t7_cfg.idle = 100;
-                       return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
-               }
-       }
-
-       dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n",
-               data->t7_cfg.active, data->t7_cfg.idle);
-       return 0;
-}
-
 static int mxt_configure_objects(struct mxt_data *data,
                                 const struct firmware *cfg)
 {
@@ -1809,12 +1765,6 @@ static int mxt_configure_objects(struct mxt_data *data,
                        dev_warn(dev, "Error %d updating config\n", error);
        }
 
-       error = mxt_init_t7_power_cfg(data);
-       if (error) {
-               dev_err(dev, "Failed to initialize power cfg\n");
-               return error;
-       }
-
        error = mxt_initialize_t9_input_device(data);
        if (error)
                return error;
@@ -2093,15 +2043,16 @@ static const struct attribute_group mxt_attr_group = {
 
 static void mxt_start(struct mxt_data *data)
 {
-       mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
-
-       /* Recalibrate since chip has been in deep sleep */
-       mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
+       /* Touch enable */
+       mxt_write_object(data,
+                       MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83);
 }
 
 static void mxt_stop(struct mxt_data *data)
 {
-       mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP);
+       /* Touch disable */
+       mxt_write_object(data,
+                       MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0);
 }
 
 static int mxt_input_open(struct input_dev *dev)
@@ -2266,6 +2217,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
        struct mxt_data *data = i2c_get_clientdata(client);
        struct input_dev *input_dev = data->input_dev;
 
+       mxt_soft_reset(data);
+
        mutex_lock(&input_dev->mutex);
 
        if (input_dev->users)
index 3793fcc7e5db31117404e819272da27f4fd15d90..d4c24fb7704f5e2d87f299ac01e2aa2ba5866521 100644 (file)
@@ -850,9 +850,11 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
 }
 
 #define EDT_ATTR_CHECKSET(name, reg) \
+do {                                                           \
        if (pdata->name >= edt_ft5x06_attr_##name.limit_low &&          \
            pdata->name <= edt_ft5x06_attr_##name.limit_high)           \
-               edt_ft5x06_register_write(tsdata, reg, pdata->name)
+               edt_ft5x06_register_write(tsdata, reg, pdata->name);    \
+} while (0)
 
 #define EDT_GET_PROP(name, reg) {                              \
        u32 val;                                                \
index 1232336b960edb163278d59510a56e51840df9b9..40dfbc0444c0eaccdeca8cebfcd5f567e3d84ae4 100644 (file)
@@ -4029,14 +4029,6 @@ static int device_notifier(struct notifier_block *nb,
        if (action != BUS_NOTIFY_REMOVED_DEVICE)
                return 0;
 
-       /*
-        * If the device is still attached to a device driver we can't
-        * tear down the domain yet as DMA mappings may still be in use.
-        * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
-        */
-       if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
-               return 0;
-
        domain = find_domain(dev);
        if (!domain)
                return 0;
@@ -4428,6 +4420,10 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
                                domain_remove_one_dev_info(old_domain, dev);
                        else
                                domain_remove_dev_info(old_domain);
+
+                       if (!domain_type_is_vm_or_si(old_domain) &&
+                            list_empty(&old_domain->devices))
+                               domain_exit(old_domain);
                }
        }
 
index 68dfb0fd5ee9af38f6586ea994ef4a6ba997b282..748693192c20a0dd862f799ace8d20450bc436fe 100644 (file)
@@ -558,7 +558,7 @@ static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
 
 static u64 ipmmu_page_prot(unsigned int prot, u64 type)
 {
-       u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
+       u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
                   | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
                   | ARM_VMSA_PTE_NS | type;
 
@@ -568,8 +568,8 @@ static u64 ipmmu_page_prot(unsigned int prot, u64 type)
        if (prot & IOMMU_CACHE)
                pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
 
-       if (prot & IOMMU_EXEC)
-               pgprot &= ~ARM_VMSA_PTE_XN;
+       if (prot & IOMMU_NOEXEC)
+               pgprot |= ARM_VMSA_PTE_XN;
        else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
                /* If no access create a faulting entry to avoid TLB fills. */
                pgprot &= ~ARM_VMSA_PTE_PAGE;
index b2023af384b9be0852c04195f75cea93582d47a8..6a8b1ec4a48a1f1100bc0f9f301fc658758a35ca 100644 (file)
@@ -1009,7 +1009,6 @@ static struct platform_driver rk_iommu_driver = {
        .remove = rk_iommu_remove,
        .driver = {
                   .name = "rk_iommu",
-                  .owner = THIS_MODULE,
                   .of_match_table = of_match_ptr(rk_iommu_dt_ids),
        },
 };
index f722a0c466cfee07de8f982a9c223478d1765c25..c48da057dbb1e5b6be38e6b28daced1046454687 100644 (file)
@@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = {
        .attach_dev     = gart_iommu_attach_dev,
        .detach_dev     = gart_iommu_detach_dev,
        .map            = gart_iommu_map,
+       .map_sg         = default_iommu_map_sg,
        .unmap          = gart_iommu_unmap,
        .iova_to_phys   = gart_iommu_iova_to_phys,
        .pgsize_bitmap  = GART_IOMMU_PGSIZES,
@@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
        do_gart_setup(gart, NULL);
 
        gart_handle = gart;
-       bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
+
        return 0;
 }
 
index d111ac779c4058e94c6051536cdb7fb5d2d34863..63cd031b2c28d40c9c1296ae466005a5d48843d3 100644 (file)
@@ -28,7 +28,7 @@
 #define AT91_AIC_IRQ_MIN_PRIORITY      0
 #define AT91_AIC_IRQ_MAX_PRIORITY      7
 
-#define AT91_AIC_SRCTYPE               GENMASK(7, 6)
+#define AT91_AIC_SRCTYPE               GENMASK(6, 5)
 #define AT91_AIC_SRCTYPE_LOW           (0 << 5)
 #define AT91_AIC_SRCTYPE_FALLING       (1 << 5)
 #define AT91_AIC_SRCTYPE_HIGH          (2 << 5)
@@ -74,7 +74,7 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
                return -EINVAL;
        }
 
-       *val &= AT91_AIC_SRCTYPE;
+       *val &= ~AT91_AIC_SRCTYPE;
        *val |= aic_type;
 
        return 0;
index 86e4684adeb12d8f8f493dfaa686310c3c0bc4de..d8996bdf0f61e95e45ee670e44e565d045fb9535 100644 (file)
@@ -1053,7 +1053,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
         * of two entries. No, the architecture doesn't let you
         * express an ITT with a single entry.
         */
-       nr_ites = max(2, roundup_pow_of_two(nvecs));
+       nr_ites = max(2UL, roundup_pow_of_two(nvecs));
        sz = nr_ites * its->ite_size;
        sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
        itt = kmalloc(sz, GFP_KERNEL);
index 29b8f21b74d0a5868b7ee4034bee285c07bb332b..6bc2deb73d533b3a226f66c2ef17ed2aa3eaf3bb 100644 (file)
@@ -381,7 +381,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
         * It will be refined as each CPU probes its ID.
         */
        for (i = 0; i < NR_HIP04_CPU_IF; i++)
-               hip04_cpu_map[i] = 0xff;
+               hip04_cpu_map[i] = 0xffff;
 
        /*
         * Find out how many interrupts are supported.
index 7e342df6a62f58be6e5fcc6a60b71cc81ed15b9c..0b0d2c00a2df8dfda1000cbd92e7d5b1bad824ae 100644 (file)
@@ -137,9 +137,9 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
                return -ENOMEM;
 
        chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol");
-       if (!chip_data->intpol_base) {
+       if (IS_ERR(chip_data->intpol_base)) {
                pr_err("mtk_sysirq: unable to map sysirq register\n");
-               ret = -ENOMEM;
+               ret = PTR_ERR(chip_data->intpol_base);
                goto out_free;
        }
 
index 28718d3e8281032d422d2c9985d7ca450b7763a1..c03f140acbaebf9b29c55548833ffe60a7163907 100644 (file)
@@ -263,7 +263,7 @@ static int __init omap_init_irq_of(struct device_node *node)
        return ret;
 }
 
-static int __init omap_init_irq_legacy(u32 base)
+static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
 {
        int j, irq_base;
 
@@ -277,7 +277,7 @@ static int __init omap_init_irq_legacy(u32 base)
                irq_base = 0;
        }
 
-       domain = irq_domain_add_legacy(NULL, omap_nr_irqs, irq_base, 0,
+       domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
                        &irq_domain_simple_ops, NULL);
 
        omap_irq_soft_reset();
@@ -301,10 +301,26 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
 {
        int ret;
 
-       if (node)
+       /*
+        * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
+        * depends is still not ready for linear IRQ domains; because of that
+        * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
+        * linear IRQ Domain until that driver is finally fixed.
+        */
+       if (of_device_is_compatible(node, "ti,omap2-intc") ||
+                       of_device_is_compatible(node, "ti,omap3-intc")) {
+               struct resource res;
+
+               if (of_address_to_resource(node, 0, &res))
+                       return -ENOMEM;
+
+               base = res.start;
+               ret = omap_init_irq_legacy(base, node);
+       } else if (node) {
                ret = omap_init_irq_of(node);
-       else
-               ret = omap_init_irq_legacy(base);
+       } else {
+               ret = omap_init_irq_legacy(base, NULL);
+       }
 
        if (ret == 0)
                omap_irq_enable_protection();
index a82e542ffc21dd4dccf9d1b810ab9373984c1d0d..d7c286656a25721ec58fa16a5f3fdb277a0747a5 100644 (file)
@@ -1474,7 +1474,7 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
                                        add_ai(plci, &parms[5]);
                                        sig_req(plci, REJECT, 0);
                                }
-                               else if (Reject == 1 || Reject > 9)
+                               else if (Reject == 1 || Reject >= 9)
                                {
                                        add_ai(plci, &parms[5]);
                                        sig_req(plci, HANGUP, 0);
@@ -4880,7 +4880,7 @@ static void sig_ind(PLCI *plci)
        byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
        byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00";
        byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
-       byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00";
+       byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00";
        byte force_mt_info = false;
        byte dir;
        dword d;
index ccd7d851be26d27913a26656cdef1fc838c90870..a77eea594b695c59c96903bba4be80063eec1ad7 100644 (file)
@@ -754,10 +754,10 @@ dbusy_timer_handler(struct isac_hw *isac)
 }
 
 static int
-open_dchannel(struct isac_hw *isac, struct channel_req *rq)
+open_dchannel_caller(struct isac_hw *isac, struct channel_req *rq, void *caller)
 {
        pr_debug("%s: %s dev(%d) open from %p\n", isac->name, __func__,
-                isac->dch.dev.id, __builtin_return_address(1));
+                isac->dch.dev.id, caller);
        if (rq->protocol != ISDN_P_TE_S0)
                return -EINVAL;
        if (rq->adr.channel == 1)
@@ -771,6 +771,12 @@ open_dchannel(struct isac_hw *isac, struct channel_req *rq)
        return 0;
 }
 
+static int
+open_dchannel(struct isac_hw *isac, struct channel_req *rq)
+{
+       return open_dchannel_caller(isac, rq, __builtin_return_address(0));
+}
+
 static const char *ISACVer[] =
 {"2086/2186 V1.1", "2085 B1", "2085 B2",
  "2085 V2.3"};
@@ -1548,7 +1554,7 @@ ipac_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
        case OPEN_CHANNEL:
                rq = arg;
                if (rq->protocol == ISDN_P_TE_S0)
-                       err = open_dchannel(isac, rq);
+                       err = open_dchannel_caller(isac, rq, __builtin_return_address(0));
                else
                        err = open_bchannel(ipac, rq);
                if (err)
index de69f6828c767a402fc8a63825a8d2bac8f8a172..741675525b5338e81a66f7e731aa7201b2dfe568 100644 (file)
@@ -1176,10 +1176,10 @@ w6692_l1callback(struct dchannel *dch, u32 cmd)
 }
 
 static int
-open_dchannel(struct w6692_hw *card, struct channel_req *rq)
+open_dchannel(struct w6692_hw *card, struct channel_req *rq, void *caller)
 {
        pr_debug("%s: %s dev(%d) open from %p\n", card->name, __func__,
-                card->dch.dev.id, __builtin_return_address(1));
+                card->dch.dev.id, caller);
        if (rq->protocol != ISDN_P_TE_S0)
                return -EINVAL;
        if (rq->adr.channel == 1)
@@ -1207,7 +1207,7 @@ w6692_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
        case OPEN_CHANNEL:
                rq = arg;
                if (rq->protocol == ISDN_P_TE_S0)
-                       err = open_dchannel(card, rq);
+                       err = open_dchannel(card, rq, __builtin_return_address(0));
                else
                        err = open_bchannel(card, rq);
                if (err)
index 5a4da94aefb056becbca88e5110282608b26df0c..ef9c8e4f1fa2900fc00ecc9adecedb7dd07a1cee 100644 (file)
@@ -59,7 +59,8 @@ isdnloop_bchan_send(isdnloop_card *card, int ch)
        isdn_ctrl cmd;
 
        while (card->sndcount[ch]) {
-               if ((skb = skb_dequeue(&card->bqueue[ch]))) {
+               skb = skb_dequeue(&card->bqueue[ch]);
+               if (skb) {
                        len = skb->len;
                        card->sndcount[ch] -= len;
                        ack = *(skb->head); /* used as scratch area */
@@ -149,8 +150,7 @@ typedef struct isdnloop_stat {
        int action;
 } isdnloop_stat;
 /* *INDENT-OFF* */
-static isdnloop_stat isdnloop_stat_table[] =
-{
+static isdnloop_stat isdnloop_stat_table[] = {
        {"BCON_",          ISDN_STAT_BCONN, 1}, /* B-Channel connected        */
        {"BDIS_",          ISDN_STAT_BHUP,  2}, /* B-Channel disconnected     */
        {"DCON_",          ISDN_STAT_DCONN, 0}, /* D-Channel connected        */
@@ -317,7 +317,8 @@ isdnloop_polldchan(unsigned long data)
        u_char *p;
        isdn_ctrl cmd;
 
-       if ((skb = skb_dequeue(&card->dqueue)))
+       skb = skb_dequeue(&card->dqueue);
+       if (skb)
                avail = skb->len;
        else
                avail = 0;
@@ -471,8 +472,8 @@ isdnloop_fake(isdnloop_card *card, char *s, int ch)
 {
        struct sk_buff *skb;
        int len = strlen(s) + ((ch >= 0) ? 3 : 0);
-
-       if (!(skb = dev_alloc_skb(len))) {
+       skb = dev_alloc_skb(len);
+       if (!skb) {
                printk(KERN_WARNING "isdnloop: Out of memory in isdnloop_fake\n");
                return 1;
        }
@@ -483,8 +484,7 @@ isdnloop_fake(isdnloop_card *card, char *s, int ch)
        return 0;
 }
 /* *INDENT-OFF* */
-static isdnloop_stat isdnloop_cmd_table[] =
-{
+static isdnloop_stat isdnloop_cmd_table[] = {
        {"BCON_R",         0,  1},      /* B-Channel connect        */
        {"BCON_I",         0, 17},      /* B-Channel connect ind    */
        {"BDIS_R",         0,  2},      /* B-Channel disconnect     */
@@ -525,10 +525,8 @@ isdnloop_fake_err(isdnloop_card *card)
        isdnloop_fake(card, "NAK", -1);
 }
 
-static u_char ctable_eu[] =
-{0x00, 0x11, 0x01, 0x12};
-static u_char ctable_1t[] =
-{0x00, 0x3b, 0x01, 0x3a};
+static u_char ctable_eu[] = {0x00, 0x11, 0x01, 0x12};
+static u_char ctable_1t[] = {0x00, 0x3b, 0x01, 0x3a};
 
 /*
  * Assemble a simplified cause message depending on the
@@ -554,9 +552,9 @@ isdnloop_unicause(isdnloop_card *card, int loc, int cau)
                sprintf(buf, "%02X44", ctable_1t[cau]);
                break;
        default:
-               return ("0000");
+               return "0000";
        }
-       return (buf);
+       return buf;
 }
 
 /*
@@ -647,10 +645,8 @@ isdnloop_kill_ctimer(isdnloop_card *card, int ch)
        spin_unlock_irqrestore(&card->isdnloop_lock, flags);
 }
 
-static u_char si2bit[] =
-{0, 1, 0, 0, 0, 2, 0, 4, 0, 0};
-static u_char bit2si[] =
-{1, 5, 7};
+static u_char si2bit[] = {0, 1, 0, 0, 0, 2, 0, 4, 0, 0};
+static u_char bit2si[] = {1, 5, 7};
 
 /*
  * Try finding a listener for an outgoing call.
@@ -754,17 +750,17 @@ isdnloop_vstphone(isdnloop_card *card, char *phone, int caller)
                if (caller) {
                        for (i = 0; i < 2; i++)
                                if (!(strcmp(card->s0num[i], phone)))
-                                       return (phone);
-                       return (card->s0num[0]);
+                                       return phone;
+                       return card->s0num[0];
                }
-               return (phone);
+               return phone;
                break;
        case ISDN_PTYPE_1TR6:
                if (caller) {
                        sprintf(nphone, "%s%c", card->s0num[0], phone[0]);
-                       return (nphone);
+                       return nphone;
                } else
-                       return (&phone[strlen(phone) - 1]);
+                       return &phone[strlen(phone) - 1];
                break;
        }
        return "";
@@ -1148,14 +1144,14 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
                case ISDNLOOP_IOCTL_STARTUP:
                        if (!access_ok(VERIFY_READ, (void *) a, sizeof(isdnloop_sdef)))
                                return -EFAULT;
-                       return (isdnloop_start(card, (isdnloop_sdef *) a));
+                       return isdnloop_start(card, (isdnloop_sdef *) a);
                        break;
                case ISDNLOOP_IOCTL_ADDCARD:
                        if (copy_from_user((char *)&cdef,
                                           (char *)a,
                                           sizeof(cdef)))
                                return -EFAULT;
-                       return (isdnloop_addcard(cdef.id1));
+                       return isdnloop_addcard(cdef.id1);
                        break;
                case ISDNLOOP_IOCTL_LEASEDCFG:
                        if (a) {
@@ -1377,7 +1373,7 @@ if_command(isdn_ctrl *c)
        isdnloop_card *card = isdnloop_findcard(c->driver);
 
        if (card)
-               return (isdnloop_command(c, card));
+               return isdnloop_command(c, card);
        printk(KERN_ERR
               "isdnloop: if_command called with invalid driverId!\n");
        return -ENODEV;
@@ -1391,7 +1387,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel)
        if (card) {
                if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
                        return -ENODEV;
-               return (isdnloop_writecmd(buf, len, 1, card));
+               return isdnloop_writecmd(buf, len, 1, card);
        }
        printk(KERN_ERR
               "isdnloop: if_writecmd called with invalid driverId!\n");
@@ -1406,7 +1402,7 @@ if_readstatus(u_char __user *buf, int len, int id, int channel)
        if (card) {
                if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
                        return -ENODEV;
-               return (isdnloop_readstatus(buf, len, card));
+               return isdnloop_readstatus(buf, len, card);
        }
        printk(KERN_ERR
               "isdnloop: if_readstatus called with invalid driverId!\n");
@@ -1423,7 +1419,7 @@ if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
                        return -ENODEV;
                /* ack request stored in skb scratch area */
                *(skb->head) = ack;
-               return (isdnloop_sendbuf(channel, skb, card));
+               return isdnloop_sendbuf(channel, skb, card);
        }
        printk(KERN_ERR
               "isdnloop: if_sendbuf called with invalid driverId!\n");
@@ -1439,8 +1435,8 @@ isdnloop_initcard(char *id)
 {
        isdnloop_card *card;
        int i;
-
-       if (!(card = kzalloc(sizeof(isdnloop_card), GFP_KERNEL))) {
+       card = kzalloc(sizeof(isdnloop_card), GFP_KERNEL);
+       if (!card) {
                printk(KERN_WARNING
                       "isdnloop: (%s) Could not allocate card-struct.\n", id);
                return (isdnloop_card *) 0;
@@ -1489,8 +1485,8 @@ static int
 isdnloop_addcard(char *id1)
 {
        isdnloop_card *card;
-
-       if (!(card = isdnloop_initcard(id1))) {
+       card = isdnloop_initcard(id1);
+       if (!card) {
                return -EIO;
        }
        printk(KERN_INFO
@@ -1503,7 +1499,7 @@ static int __init
 isdnloop_init(void)
 {
        if (isdnloop_id)
-               return (isdnloop_addcard(isdnloop_id));
+               return isdnloop_addcard(isdnloop_id);
 
        return 0;
 }
index d6f19b168e8a1a6101fd11dea5de140eed90dffb..3597ef47b28a4763a782886e605024b76523a3f7 100644 (file)
@@ -30,7 +30,7 @@ static const char *boardname[] = { "DataCommute/BRI", "DataCommute/PRI", "TeleCo
 static unsigned int io[] = {0, 0, 0, 0};
 static unsigned char irq[] = {0, 0, 0, 0};
 static unsigned long ram[] = {0, 0, 0, 0};
-static bool do_reset = 0;
+static bool do_reset;
 
 module_param_array(io, int, NULL, 0);
 module_param_array(irq, byte, NULL, 0);
@@ -104,13 +104,12 @@ static int __init sc_init(void)
                                         io[b] + 0x400 * EXP_PAGE0);
                                continue;
                        }
-               }
-               else {
+               } else {
                        /*
                         * Yes, probe for I/O Base
                         */
                        if (probe_exhasted) {
-                               pr_debug("All probe addresses exhasted, skipping\n");
+                               pr_debug("All probe addresses exhausted, skipping\n");
                                continue;
                        }
                        pr_debug("Probing for I/O...\n");
@@ -169,8 +168,7 @@ static int __init sc_init(void)
                                model = identify_board(ram[b], io[b]);
                                release_region(ram[b], SRAM_PAGESIZE);
                        }
-               }
-               else {
+               } else {
                        /*
                         * Yes, probe for free RAM and look for
                         * a signature and id the board model
@@ -187,7 +185,7 @@ static int __init sc_init(void)
                                                ram[b] = i;
                                                break;
                                        }
-                                       pr_debug("  Unidentifed or inaccessible\n");
+                                       pr_debug("  Unidentified or inaccessible\n");
                                        continue;
                                }
                                pr_debug("  request failed\n");
@@ -337,8 +335,7 @@ static int __init sc_init(void)
                sc_adapter[cinst]->interrupt = irq[b];
                if (request_irq(sc_adapter[cinst]->interrupt, interrupt_handler,
                                0, interface->id,
-                               (void *)(unsigned long) cinst))
-               {
+                               (void *)(unsigned long) cinst)) {
                        kfree(sc_adapter[cinst]->channel);
                        indicate_status(cinst, ISDN_STAT_UNLOAD, 0, NULL);      /* Fix me */
                        kfree(interface);
index 26515c27ea8cca18a2e5f5ac24f82a6158194182..25e419752a7b7c5719baa69bd114d57720a62f92 100644 (file)
@@ -330,18 +330,18 @@ create_netxbig_led(struct platform_device *pdev,
        led_dat->sata = 0;
        led_dat->cdev.brightness = LED_OFF;
        led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
-       /*
-        * If available, expose the SATA activity blink capability through
-        * a "sata" sysfs attribute.
-        */
-       if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
-               led_dat->cdev.groups = netxbig_led_groups;
        led_dat->mode_addr = template->mode_addr;
        led_dat->mode_val = template->mode_val;
        led_dat->bright_addr = template->bright_addr;
        led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1;
        led_dat->timer = pdata->timer;
        led_dat->num_timer = pdata->num_timer;
+       /*
+        * If available, expose the SATA activity blink capability through
+        * a "sata" sysfs attribute.
+        */
+       if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
+               led_dat->cdev.groups = netxbig_led_groups;
 
        return led_classdev_register(&pdev->dev, &led_dat->cdev);
 }
index f956ef26c0ce2ddc1f81014b8ab5e0b658eb9977..fb7493dcfb79f409a8f483480f1c8d50bdba2278 100644 (file)
@@ -7,6 +7,7 @@
 #define PCI_DEVICE_ID_MEN_CHAMELEON    0x4d45
 #define CHAMELEON_FILENAME_LEN         12
 #define CHAMELEONV2_MAGIC              0xabce
+#define CHAM_HEADER_SIZE               0x200
 
 enum chameleon_descriptor_type {
        CHAMELEON_DTYPE_GENERAL = 0x0,
index b5918196564376b028e825ccc75131ae74b1cdd0..5e1bd5db02c8ee7f21e0de9b778d2d1bb17a2d6c 100644 (file)
@@ -17,6 +17,7 @@
 
 struct priv {
        struct mcb_bus *bus;
+       phys_addr_t mapbase;
        void __iomem *base;
 };
 
@@ -31,8 +32,8 @@ static int mcb_pci_get_irq(struct mcb_device *mdev)
 
 static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
+       struct resource *res;
        struct priv *priv;
-       phys_addr_t mapbase;
        int ret;
        int num_cells;
        unsigned long flags;
@@ -47,19 +48,21 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                return -ENODEV;
        }
 
-       mapbase = pci_resource_start(pdev, 0);
-       if (!mapbase) {
+       priv->mapbase = pci_resource_start(pdev, 0);
+       if (!priv->mapbase) {
                dev_err(&pdev->dev, "No PCI resource\n");
                goto err_start;
        }
 
-       ret = pci_request_region(pdev, 0, KBUILD_MODNAME);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to request PCI BARs\n");
+       res = request_mem_region(priv->mapbase, CHAM_HEADER_SIZE,
+                                KBUILD_MODNAME);
+       if (IS_ERR(res)) {
+               dev_err(&pdev->dev, "Failed to request PCI memory\n");
+               ret = PTR_ERR(res);
                goto err_start;
        }
 
-       priv->base = pci_iomap(pdev, 0, 0);
+       priv->base = ioremap(priv->mapbase, CHAM_HEADER_SIZE);
        if (!priv->base) {
                dev_err(&pdev->dev, "Cannot ioremap\n");
                ret = -ENOMEM;
@@ -84,7 +87,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        priv->bus->get_irq = mcb_pci_get_irq;
 
-       ret = chameleon_parse_cells(priv->bus, mapbase, priv->base);
+       ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base);
        if (ret < 0)
                goto err_drvdata;
        num_cells = ret;
@@ -93,8 +96,10 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        mcb_bus_add_devices(priv->bus);
 
+       return 0;
+
 err_drvdata:
-       pci_iounmap(pdev, priv->base);
+       iounmap(priv->base);
 err_ioremap:
        pci_release_region(pdev, 0);
 err_start:
@@ -107,6 +112,10 @@ static void mcb_pci_remove(struct pci_dev *pdev)
        struct priv *priv = pci_get_drvdata(pdev);
 
        mcb_release_bus(priv->bus);
+
+       iounmap(priv->base);
+       release_region(priv->mapbase, CHAM_HEADER_SIZE);
+       pci_disable_device(pdev);
 }
 
 static const struct pci_device_id mcb_pci_tbl[] = {
index da3604e73e8abafbd8e127dbfc659360d23310e5..1695ee5f3ffc30b883c83c1926745ba22e48a7e1 100644 (file)
@@ -72,6 +72,19 @@ __acquires(bitmap->lock)
        /* this page has not been allocated yet */
 
        spin_unlock_irq(&bitmap->lock);
+       /* It is possible that this is being called inside a
+        * prepare_to_wait/finish_wait loop from raid5c:make_request().
+        * In general it is not permitted to sleep in that context as it
+        * can cause the loop to spin freely.
+        * That doesn't apply here as we can only reach this point
+        * once with any loop.
+        * When this function completes, either bp[page].map or
+        * bp[page].hijacked.  In either case, this function will
+        * abort before getting to this point again.  So there is
+        * no risk of a free-spin, and so it is safe to assert
+        * that sleeping here is allowed.
+        */
+       sched_annotate_sleep();
        mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
        spin_lock_irq(&bitmap->lock);
 
index 9fc616c2755ed752a50930e1095df8a7a34303f2..c1c010498a21b99a9bf730b0cd4277776abc159a 100644 (file)
@@ -94,6 +94,9 @@ struct cache_disk_superblock {
 } __packed;
 
 struct dm_cache_metadata {
+       atomic_t ref_count;
+       struct list_head list;
+
        struct block_device *bdev;
        struct dm_block_manager *bm;
        struct dm_space_map *metadata_sm;
@@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
 
 /*----------------------------------------------------------------*/
 
-struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
-                                                sector_t data_block_size,
-                                                bool may_format_device,
-                                                size_t policy_hint_size)
+static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
+                                              sector_t data_block_size,
+                                              bool may_format_device,
+                                              size_t policy_hint_size)
 {
        int r;
        struct dm_cache_metadata *cmd;
@@ -680,9 +683,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
        if (!cmd) {
                DMERR("could not allocate metadata struct");
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
 
+       atomic_set(&cmd->ref_count, 1);
        init_rwsem(&cmd->root_lock);
        cmd->bdev = bdev;
        cmd->data_block_size = data_block_size;
@@ -705,10 +709,96 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
        return cmd;
 }
 
+/*
+ * We keep a little list of ref counted metadata objects to prevent two
+ * different target instances creating separate bufio instances.  This is
+ * an issue if a table is reloaded before the suspend.
+ */
+static DEFINE_MUTEX(table_lock);
+static LIST_HEAD(table);
+
+static struct dm_cache_metadata *lookup(struct block_device *bdev)
+{
+       struct dm_cache_metadata *cmd;
+
+       list_for_each_entry(cmd, &table, list)
+               if (cmd->bdev == bdev) {
+                       atomic_inc(&cmd->ref_count);
+                       return cmd;
+               }
+
+       return NULL;
+}
+
+static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
+                                               sector_t data_block_size,
+                                               bool may_format_device,
+                                               size_t policy_hint_size)
+{
+       struct dm_cache_metadata *cmd, *cmd2;
+
+       mutex_lock(&table_lock);
+       cmd = lookup(bdev);
+       mutex_unlock(&table_lock);
+
+       if (cmd)
+               return cmd;
+
+       cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
+       if (!IS_ERR(cmd)) {
+               mutex_lock(&table_lock);
+               cmd2 = lookup(bdev);
+               if (cmd2) {
+                       mutex_unlock(&table_lock);
+                       __destroy_persistent_data_objects(cmd);
+                       kfree(cmd);
+                       return cmd2;
+               }
+               list_add(&cmd->list, &table);
+               mutex_unlock(&table_lock);
+       }
+
+       return cmd;
+}
+
+static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
+{
+       if (cmd->data_block_size != data_block_size) {
+               DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
+                     (unsigned long long) data_block_size,
+                     (unsigned long long) cmd->data_block_size);
+               return false;
+       }
+
+       return true;
+}
+
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+                                                sector_t data_block_size,
+                                                bool may_format_device,
+                                                size_t policy_hint_size)
+{
+       struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
+                                                      may_format_device, policy_hint_size);
+
+       if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
+               dm_cache_metadata_close(cmd);
+               return ERR_PTR(-EINVAL);
+       }
+
+       return cmd;
+}
+
 void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
 {
-       __destroy_persistent_data_objects(cmd);
-       kfree(cmd);
+       if (atomic_dec_and_test(&cmd->ref_count)) {
+               mutex_lock(&table_lock);
+               list_del(&cmd->list);
+               mutex_unlock(&table_lock);
+
+               __destroy_persistent_data_objects(cmd);
+               kfree(cmd);
+       }
 }
 
 /*
index 1e96d7889f51eaa08b7d65b04c1a43e063931708..e1650539cc2f826d9efe7f878352570bcc31e101 100644 (file)
@@ -221,7 +221,13 @@ struct cache {
        struct list_head need_commit_migrations;
        sector_t migration_threshold;
        wait_queue_head_t migration_wait;
-       atomic_t nr_migrations;
+       atomic_t nr_allocated_migrations;
+
+       /*
+        * The number of in flight migrations that are performing
+        * background io. eg, promotion, writeback.
+        */
+       atomic_t nr_io_migrations;
 
        wait_queue_head_t quiescing_wait;
        atomic_t quiescing;
@@ -258,7 +264,6 @@ struct cache {
        struct dm_deferred_set *all_io_ds;
 
        mempool_t *migration_pool;
-       struct dm_cache_migration *next_migration;
 
        struct dm_cache_policy *policy;
        unsigned policy_nr_args;
@@ -350,10 +355,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel
        dm_bio_prison_free_cell(cache->prison, cell);
 }
 
+static struct dm_cache_migration *alloc_migration(struct cache *cache)
+{
+       struct dm_cache_migration *mg;
+
+       mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+       if (mg) {
+               mg->cache = cache;
+               atomic_inc(&mg->cache->nr_allocated_migrations);
+       }
+
+       return mg;
+}
+
+static void free_migration(struct dm_cache_migration *mg)
+{
+       if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
+               wake_up(&mg->cache->migration_wait);
+
+       mempool_free(mg, mg->cache->migration_pool);
+}
+
 static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
 {
        if (!p->mg) {
-               p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+               p->mg = alloc_migration(cache);
                if (!p->mg)
                        return -ENOMEM;
        }
@@ -382,7 +408,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
                free_prison_cell(cache, p->cell1);
 
        if (p->mg)
-               mempool_free(p->mg, cache->migration_pool);
+               free_migration(p->mg);
 }
 
 static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
@@ -854,24 +880,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
  * Migration covers moving data from the origin device to the cache, or
  * vice versa.
  *--------------------------------------------------------------*/
-static void free_migration(struct dm_cache_migration *mg)
-{
-       mempool_free(mg, mg->cache->migration_pool);
-}
-
-static void inc_nr_migrations(struct cache *cache)
+static void inc_io_migrations(struct cache *cache)
 {
-       atomic_inc(&cache->nr_migrations);
+       atomic_inc(&cache->nr_io_migrations);
 }
 
-static void dec_nr_migrations(struct cache *cache)
+static void dec_io_migrations(struct cache *cache)
 {
-       atomic_dec(&cache->nr_migrations);
-
-       /*
-        * Wake the worker in case we're suspending the target.
-        */
-       wake_up(&cache->migration_wait);
+       atomic_dec(&cache->nr_io_migrations);
 }
 
 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
@@ -894,11 +910,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
        wake_worker(cache);
 }
 
-static void cleanup_migration(struct dm_cache_migration *mg)
+static void free_io_migration(struct dm_cache_migration *mg)
 {
-       struct cache *cache = mg->cache;
+       dec_io_migrations(mg->cache);
        free_migration(mg);
-       dec_nr_migrations(cache);
 }
 
 static void migration_failure(struct dm_cache_migration *mg)
@@ -923,7 +938,7 @@ static void migration_failure(struct dm_cache_migration *mg)
                cell_defer(cache, mg->new_ocell, true);
        }
 
-       cleanup_migration(mg);
+       free_io_migration(mg);
 }
 
 static void migration_success_pre_commit(struct dm_cache_migration *mg)
@@ -934,7 +949,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
        if (mg->writeback) {
                clear_dirty(cache, mg->old_oblock, mg->cblock);
                cell_defer(cache, mg->old_ocell, false);
-               cleanup_migration(mg);
+               free_io_migration(mg);
                return;
 
        } else if (mg->demote) {
@@ -944,14 +959,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
                                             mg->old_oblock);
                        if (mg->promote)
                                cell_defer(cache, mg->new_ocell, true);
-                       cleanup_migration(mg);
+                       free_io_migration(mg);
                        return;
                }
        } else {
                if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
                        DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
                        policy_remove_mapping(cache->policy, mg->new_oblock);
-                       cleanup_migration(mg);
+                       free_io_migration(mg);
                        return;
                }
        }
@@ -984,7 +999,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
                } else {
                        if (mg->invalidate)
                                policy_remove_mapping(cache->policy, mg->old_oblock);
-                       cleanup_migration(mg);
+                       free_io_migration(mg);
                }
 
        } else {
@@ -999,7 +1014,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
                        bio_endio(mg->new_ocell->holder, 0);
                        cell_defer(cache, mg->new_ocell, false);
                }
-               cleanup_migration(mg);
+               free_io_migration(mg);
        }
 }
 
@@ -1251,7 +1266,7 @@ static void promote(struct cache *cache, struct prealloc *structs,
        mg->new_ocell = cell;
        mg->start_jiffies = jiffies;
 
-       inc_nr_migrations(cache);
+       inc_io_migrations(cache);
        quiesce_migration(mg);
 }
 
@@ -1275,7 +1290,7 @@ static void writeback(struct cache *cache, struct prealloc *structs,
        mg->new_ocell = NULL;
        mg->start_jiffies = jiffies;
 
-       inc_nr_migrations(cache);
+       inc_io_migrations(cache);
        quiesce_migration(mg);
 }
 
@@ -1302,7 +1317,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
        mg->new_ocell = new_ocell;
        mg->start_jiffies = jiffies;
 
-       inc_nr_migrations(cache);
+       inc_io_migrations(cache);
        quiesce_migration(mg);
 }
 
@@ -1330,7 +1345,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs,
        mg->new_ocell = NULL;
        mg->start_jiffies = jiffies;
 
-       inc_nr_migrations(cache);
+       inc_io_migrations(cache);
        quiesce_migration(mg);
 }
 
@@ -1412,7 +1427,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs,
 
 static bool spare_migration_bandwidth(struct cache *cache)
 {
-       sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
+       sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
                cache->sectors_per_block;
        return current_volume < cache->migration_threshold;
 }
@@ -1764,7 +1779,7 @@ static void stop_quiescing(struct cache *cache)
 
 static void wait_for_migrations(struct cache *cache)
 {
-       wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
+       wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
 }
 
 static void stop_worker(struct cache *cache)
@@ -1876,9 +1891,6 @@ static void destroy(struct cache *cache)
 {
        unsigned i;
 
-       if (cache->next_migration)
-               mempool_free(cache->next_migration, cache->migration_pool);
-
        if (cache->migration_pool)
                mempool_destroy(cache->migration_pool);
 
@@ -2424,7 +2436,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        INIT_LIST_HEAD(&cache->quiesced_migrations);
        INIT_LIST_HEAD(&cache->completed_migrations);
        INIT_LIST_HEAD(&cache->need_commit_migrations);
-       atomic_set(&cache->nr_migrations, 0);
+       atomic_set(&cache->nr_allocated_migrations, 0);
+       atomic_set(&cache->nr_io_migrations, 0);
        init_waitqueue_head(&cache->migration_wait);
 
        init_waitqueue_head(&cache->quiescing_wait);
@@ -2487,8 +2500,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
                goto bad;
        }
 
-       cache->next_migration = NULL;
-
        cache->need_tick_bio = true;
        cache->sized = false;
        cache->invalidate = false;
index 493478989dbd4349b23716aa3dbdd92e0d1bc37f..07705ee181e3d2837c47954626276f9dea52cac0 100644 (file)
@@ -3385,6 +3385,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
        struct pool_c *pt = ti->private;
        struct pool *pool = pt->pool;
 
+       if (get_pool_mode(pool) >= PM_READ_ONLY) {
+               DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
+                     dm_device_name(pool->pool_md));
+               return -EINVAL;
+       }
+
        if (!strcasecmp(argv[0], "create_thin"))
                r = process_create_thin_mesg(argc, argv, pool);
 
index b98cd9d84435fe15ea1cb202850508f83b83204b..2caf5b374649afaecff37a5ab5153d7ccc5ec437 100644 (file)
@@ -206,6 +206,9 @@ struct mapped_device {
        /* zero-length flush that will be cloned and submitted to targets */
        struct bio flush_bio;
 
+       /* the number of internal suspends */
+       unsigned internal_suspend_count;
+
        struct dm_stats stats;
 };
 
@@ -2928,7 +2931,7 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
 {
        struct dm_table *map = NULL;
 
-       if (dm_suspended_internally_md(md))
+       if (md->internal_suspend_count++)
                return; /* nested internal suspend */
 
        if (dm_suspended_md(md)) {
@@ -2953,7 +2956,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
 
 static void __dm_internal_resume(struct mapped_device *md)
 {
-       if (!dm_suspended_internally_md(md))
+       BUG_ON(!md->internal_suspend_count);
+
+       if (--md->internal_suspend_count)
                return; /* resume from nested internal suspend */
 
        if (dm_suspended_md(md))
index c1b0d52bfcb0f7b014bbc48f4823e3b9b028dcce..b98765f6f77fd9f1e11ecdcd8809928e7b821716 100644 (file)
@@ -3195,6 +3195,11 @@ static void handle_stripe_dirtying(struct r5conf *conf,
                                          (unsigned long long)sh->sector,
                                          rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
        }
+
+       if (rcw > disks && rmw > disks &&
+           !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+               set_bit(STRIPE_DELAYED, &sh->state);
+
        /* now if nothing is locked, and if we have enough data,
         * we can start a write request
         */
index db99ca2613ba422d20c60afcbe34a4750a9876c0..06931f6fa26cc0003db9789440ebf9c5277759df 100644 (file)
@@ -614,7 +614,7 @@ struct cx23885_board cx23885_boards[] = {
                .portb          = CX23885_MPEG_DVB,
        },
        [CX23885_BOARD_HAUPPAUGE_HVR4400] = {
-               .name           = "Hauppauge WinTV-HVR4400",
+               .name           = "Hauppauge WinTV-HVR4400/HVR5500",
                .porta          = CX23885_ANALOG_VIDEO,
                .portb          = CX23885_MPEG_DVB,
                .portc          = CX23885_MPEG_DVB,
@@ -622,6 +622,10 @@ struct cx23885_board cx23885_boards[] = {
                .tuner_addr     = 0x60, /* 0xc0 >> 1 */
                .tuner_bus      = 1,
        },
+       [CX23885_BOARD_HAUPPAUGE_STARBURST] = {
+               .name           = "Hauppauge WinTV Starburst",
+               .portb          = CX23885_MPEG_DVB,
+       },
        [CX23885_BOARD_AVERMEDIA_HC81R] = {
                .name           = "AVerTV Hybrid Express Slim HC81R",
                .tuner_type     = TUNER_XC2028,
@@ -936,19 +940,19 @@ struct cx23885_subid cx23885_subids[] = {
        }, {
                .subvendor = 0x0070,
                .subdevice = 0xc108,
-               .card      = CX23885_BOARD_HAUPPAUGE_HVR4400,
+               .card      = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-4400 (Model 121xxx, Hybrid DVB-T/S2, IR) */
        }, {
                .subvendor = 0x0070,
                .subdevice = 0xc138,
-               .card      = CX23885_BOARD_HAUPPAUGE_HVR4400,
+               .card      = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
        }, {
                .subvendor = 0x0070,
                .subdevice = 0xc12a,
-               .card      = CX23885_BOARD_HAUPPAUGE_HVR4400,
+               .card      = CX23885_BOARD_HAUPPAUGE_STARBURST, /* Hauppauge WinTV Starburst (Model 121x00, DVB-S2, IR) */
        }, {
                .subvendor = 0x0070,
                .subdevice = 0xc1f8,
-               .card      = CX23885_BOARD_HAUPPAUGE_HVR4400,
+               .card      = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
        }, {
                .subvendor = 0x1461,
                .subdevice = 0xd939,
@@ -1545,8 +1549,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
                cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
                break;
        case CX23885_BOARD_HAUPPAUGE_HVR4400:
+       case CX23885_BOARD_HAUPPAUGE_STARBURST:
                /* GPIO-8 tda10071 demod reset */
-               /* GPIO-9 si2165 demod reset */
+               /* GPIO-9 si2165 demod reset (only HVR4400/HVR5500)*/
 
                /* Put the parts into reset and back */
                cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1);
@@ -1872,6 +1877,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1850:
        case CX23885_BOARD_HAUPPAUGE_HVR1290:
        case CX23885_BOARD_HAUPPAUGE_HVR4400:
+       case CX23885_BOARD_HAUPPAUGE_STARBURST:
        case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE:
                if (dev->i2c_bus[0].i2c_rc == 0)
                        hauppauge_eeprom(dev, eeprom+0xc0);
@@ -1980,6 +1986,11 @@ void cx23885_card_setup(struct cx23885_dev *dev)
                ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
                ts2->src_sel_val   = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
                break;
+       case CX23885_BOARD_HAUPPAUGE_STARBURST:
+               ts1->gen_ctrl_val  = 0xc; /* Serial bus + punctured clock */
+               ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+               ts1->src_sel_val   = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+               break;
        case CX23885_BOARD_DVBSKY_T9580:
        case CX23885_BOARD_DVBSKY_T982:
                ts1->gen_ctrl_val  = 0x5; /* Parallel */
index 1d9d0f86ca8cbe3effb833fe7e43d26185aefb41..1ad49946d7fa9c1ef8f6cb71d8e91c85d566e4a6 100644 (file)
@@ -2049,11 +2049,11 @@ static void cx23885_finidev(struct pci_dev *pci_dev)
 
        cx23885_shutdown(dev);
 
-       pci_disable_device(pci_dev);
-
        /* unregister stuff */
        free_irq(pci_dev->irq, dev);
 
+       pci_disable_device(pci_dev);
+
        cx23885_dev_unregister(dev);
        vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
        v4l2_ctrl_handler_free(&dev->ctrl_handler);
index c47d18270cfc8899f262be3f338329e869bb4cb2..a9c450d4b54e4a7d52440b6af017d9068b27435c 100644 (file)
@@ -1710,6 +1710,17 @@ static int dvb_register(struct cx23885_tsport *port)
                        break;
                }
                break;
+       case CX23885_BOARD_HAUPPAUGE_STARBURST:
+               i2c_bus = &dev->i2c_bus[0];
+               fe0->dvb.frontend = dvb_attach(tda10071_attach,
+                                               &hauppauge_tda10071_config,
+                                               &i2c_bus->i2c_adap);
+               if (fe0->dvb.frontend != NULL) {
+                       dvb_attach(a8293_attach, fe0->dvb.frontend,
+                                  &i2c_bus->i2c_adap,
+                                  &hauppauge_a8293_config);
+               }
+               break;
        case CX23885_BOARD_DVBSKY_T9580:
        case CX23885_BOARD_DVBSKY_S950:
                i2c_bus = &dev->i2c_bus[0];
index f55cd12da0fde35b55ae5c3c75d56253f64fd9ac..36f2f96c40e4362713f7e2f06987af911d6472c2 100644 (file)
@@ -99,6 +99,7 @@
 #define CX23885_BOARD_DVBSKY_S950              49
 #define CX23885_BOARD_DVBSKY_S952              50
 #define CX23885_BOARD_DVBSKY_T982              51
+#define CX23885_BOARD_HAUPPAUGE_STARBURST      52
 
 #define GPIO_0 0x00000001
 #define GPIO_1 0x00000002
index b463fe172d164a0f14400c42f13f33aa9dbc82dd..3fe9047ef466faddcb822453e08282bf381c366b 100644 (file)
@@ -602,10 +602,13 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
        strlcpy(cap->card, video->video.name, sizeof(cap->card));
        strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
 
+       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+               | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
+
        if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
-               cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+               cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
        else
-               cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+               cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
 
        return 0;
 }
index 8efe40337608951db5b72137506dd61e13e20b06..6d885239b16abf0b01b721de4f27cf924f6b7f4a 100644 (file)
@@ -760,8 +760,9 @@ static int isi_camera_querycap(struct soc_camera_host *ici,
 {
        strcpy(cap->driver, "atmel-isi");
        strcpy(cap->card, "Atmel Image Sensor Interface");
-       cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE |
-                               V4L2_CAP_STREAMING);
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
        return 0;
 }
 
index ce72bd26a6acaef29a740863eaae99232401ec6c..192377f55840b540e82ab26658830457b858f98f 100644 (file)
@@ -1256,7 +1256,8 @@ static int mx2_camera_querycap(struct soc_camera_host *ici,
 {
        /* cap->name is set by the friendly caller:-> */
        strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card));
-       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
 
        return 0;
 }
index a60c3bb0e4ccdf952307142c2196333ee6842ab5..0b3299dee05d453c87a492cd610d66e507f8a7ba 100644 (file)
@@ -967,7 +967,8 @@ static int mx3_camera_querycap(struct soc_camera_host *ici,
 {
        /* cap->name is set by the firendly caller:-> */
        strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card));
-       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
 
        return 0;
 }
index e6b93281f246c394eee1006deadb1bfac2a12be9..16f65ecb70a3e1fdf13d49cf002d2801559b83d5 100644 (file)
@@ -1427,7 +1427,8 @@ static int omap1_cam_querycap(struct soc_camera_host *ici,
 {
        /* cap->name is set by the friendly caller:-> */
        strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card));
-       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
 
        return 0;
 }
index 951226af0ebacca484127cc7792802c9eeafa577..8d6e343fec0f28bf1dcc67d0ce96ff48dc6f7250 100644 (file)
@@ -1576,7 +1576,8 @@ static int pxa_camera_querycap(struct soc_camera_host *ici,
 {
        /* cap->name is set by the firendly caller:-> */
        strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
-       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
 
        return 0;
 }
index 0c1f5564810627ec88242ac373600f03a854387d..9f1473c0a0cfa493227f50ac1920ae34818e670b 100644 (file)
@@ -1799,7 +1799,9 @@ static int rcar_vin_querycap(struct soc_camera_host *ici,
                             struct v4l2_capability *cap)
 {
        strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card));
-       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
        return 0;
 }
 
index 8b27b3eb2b2538d7e75451ffff0c06d27d2cafd6..71787702d4a26cce7ba7a1b02f2ab367ed388db5 100644 (file)
@@ -1652,7 +1652,9 @@ static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
                                  struct v4l2_capability *cap)
 {
        strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
-       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
        return 0;
 }
 
index 0f345b1f90145dd856c4830cecf82c710eeeaeeb..f327c49d7e0936baf1e529746e82166129423916 100644 (file)
@@ -2232,7 +2232,7 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties = {
                {
                        "Mygica T230 DVB-T/T2/C",
                        { NULL },
-                       { &cxusb_table[22], NULL },
+                       { &cxusb_table[20], NULL },
                },
        }
 };
index 1b158f1167ed0722793a8b58b990a1fc378e4a34..536210b39428c98ce1f8385b48ab1a13ba2cf7d2 100644 (file)
@@ -89,16 +89,6 @@ static int vbi_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1};
 module_param_array(vbi_nr, int, NULL, 0444);
 MODULE_PARM_DESC(vbi_nr, "Offset for device's vbi dev minor");
 
-static struct v4l2_capability pvr_capability ={
-       .driver         = "pvrusb2",
-       .card           = "Hauppauge WinTV pvr-usb2",
-       .bus_info       = "usb",
-       .version        = LINUX_VERSION_CODE,
-       .capabilities   = (V4L2_CAP_VIDEO_CAPTURE |
-                          V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
-                          V4L2_CAP_READWRITE),
-};
-
 static struct v4l2_fmtdesc pvr_fmtdesc [] = {
        {
                .index          = 0,
@@ -160,10 +150,22 @@ static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability *
        struct pvr2_v4l2_fh *fh = file->private_data;
        struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
 
-       memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
+       strlcpy(cap->driver, "pvrusb2", sizeof(cap->driver));
        strlcpy(cap->bus_info, pvr2_hdw_get_bus_info(hdw),
                        sizeof(cap->bus_info));
        strlcpy(cap->card, pvr2_hdw_get_desc(hdw), sizeof(cap->card));
+       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
+                           V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
+                           V4L2_CAP_READWRITE | V4L2_CAP_DEVICE_CAPS;
+       switch (fh->pdi->devbase.vfl_type) {
+       case VFL_TYPE_GRABBER:
+               cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO;
+               break;
+       case VFL_TYPE_RADIO:
+               cap->device_caps = V4L2_CAP_RADIO;
+               break;
+       }
+       cap->device_caps |= V4L2_CAP_TUNER | V4L2_CAP_READWRITE;
        return 0;
 }
 
index d09a8916e94005180f0f6beaf0ff53d7b2e932a4..bc08a829bc132068c0b51661f9459293ef30c142 100644 (file)
@@ -3146,27 +3146,26 @@ static int vb2_thread(void *data)
                        prequeue--;
                } else {
                        call_void_qop(q, wait_finish, q);
-                       ret = vb2_internal_dqbuf(q, &fileio->b, 0);
+                       if (!threadio->stop)
+                               ret = vb2_internal_dqbuf(q, &fileio->b, 0);
                        call_void_qop(q, wait_prepare, q);
                        dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
                }
-               if (threadio->stop)
-                       break;
-               if (ret)
+               if (ret || threadio->stop)
                        break;
                try_to_freeze();
 
                vb = q->bufs[fileio->b.index];
                if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR))
-                       ret = threadio->fnc(vb, threadio->priv);
-               if (ret)
-                       break;
+                       if (threadio->fnc(vb, threadio->priv))
+                               break;
                call_void_qop(q, wait_finish, q);
                if (set_timestamp)
                        v4l2_get_timestamp(&fileio->b.timestamp);
-               ret = vb2_internal_qbuf(q, &fileio->b);
+               if (!threadio->stop)
+                       ret = vb2_internal_qbuf(q, &fileio->b);
                call_void_qop(q, wait_prepare, q);
-               if (ret)
+               if (ret || threadio->stop)
                        break;
        }
 
@@ -3235,11 +3234,11 @@ int vb2_thread_stop(struct vb2_queue *q)
        threadio->stop = true;
        vb2_internal_streamoff(q, q->type);
        call_void_qop(q, wait_prepare, q);
+       err = kthread_stop(threadio->thread);
        q->fileio = NULL;
        fileio->req.count = 0;
        vb2_reqbufs(q, &fileio->req);
        kfree(fileio);
-       err = kthread_stop(threadio->thread);
        threadio->thread = NULL;
        kfree(threadio);
        q->fileio = NULL;
index 52a0c2f6264ff041f43a2fbcb2e187ada48e6d4b..ae498b53ee4042ef3e39e6f77a7272cffe4abe74 100644 (file)
@@ -554,7 +554,8 @@ int da9052_device_init(struct da9052 *da9052, u8 chip_id)
                return ret;
        }
 
-       ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
+       ret = mfd_add_devices(da9052->dev, PLATFORM_DEVID_AUTO,
+                             da9052_subdev_info,
                              ARRAY_SIZE(da9052_subdev_info), NULL, 0, NULL);
        if (ret) {
                dev_err(da9052->dev, "mfd_add_devices failed: %d\n", ret);
index dbdd0faeb6ce500678d9dc4f014504560693bded..210d1f85679e50dca4cbb034d4bd4ce91c1bb23f 100644 (file)
@@ -681,21 +681,9 @@ static void rtsx_usb_disconnect(struct usb_interface *intf)
 #ifdef CONFIG_PM
 static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
 {
-       struct rtsx_ucr *ucr =
-               (struct rtsx_ucr *)usb_get_intfdata(intf);
-
        dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n",
                        __func__, message.event);
 
-       /*
-        * Call to make sure LED is off during suspend to save more power.
-        * It is NOT a permanent state and could be turned on anytime later.
-        * Thus no need to call turn_on when resunming.
-        */
-       mutex_lock(&ucr->dev_mutex);
-       rtsx_usb_turn_off_led(ucr);
-       mutex_unlock(&ucr->dev_mutex);
-
        return 0;
 }
 
index e2f9df1c0c361f0d0e66bf782817af6170e49dc5..2d7fae94c861013c594463705e05ea413275e4d0 100644 (file)
@@ -519,6 +519,7 @@ static const u8 stmpe1601_regs[] = {
        [STMPE_IDX_GPDR_LSB]    = STMPE1601_REG_GPIO_SET_DIR_LSB,
        [STMPE_IDX_GPRER_LSB]   = STMPE1601_REG_GPIO_RE_LSB,
        [STMPE_IDX_GPFER_LSB]   = STMPE1601_REG_GPIO_FE_LSB,
+       [STMPE_IDX_GPPUR_LSB]   = STMPE1601_REG_GPIO_PU_LSB,
        [STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB,
        [STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB,
        [STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB,
@@ -667,6 +668,7 @@ static const u8 stmpe1801_regs[] = {
        [STMPE_IDX_GPDR_LSB]    = STMPE1801_REG_GPIO_SET_DIR_LOW,
        [STMPE_IDX_GPRER_LSB]   = STMPE1801_REG_GPIO_RE_LOW,
        [STMPE_IDX_GPFER_LSB]   = STMPE1801_REG_GPIO_FE_LOW,
+       [STMPE_IDX_GPPUR_LSB]   = STMPE1801_REG_GPIO_PULL_UP_LOW,
        [STMPE_IDX_IEGPIOR_LSB] = STMPE1801_REG_INT_EN_GPIO_MASK_LOW,
        [STMPE_IDX_ISGPIOR_LSB] = STMPE1801_REG_INT_STA_GPIO_LOW,
 };
@@ -750,6 +752,8 @@ static const u8 stmpe24xx_regs[] = {
        [STMPE_IDX_GPDR_LSB]    = STMPE24XX_REG_GPDR_LSB,
        [STMPE_IDX_GPRER_LSB]   = STMPE24XX_REG_GPRER_LSB,
        [STMPE_IDX_GPFER_LSB]   = STMPE24XX_REG_GPFER_LSB,
+       [STMPE_IDX_GPPUR_LSB]   = STMPE24XX_REG_GPPUR_LSB,
+       [STMPE_IDX_GPPDR_LSB]   = STMPE24XX_REG_GPPDR_LSB,
        [STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB,
        [STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB,
        [STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB,
index bee0abf82040001664c07e82c646e0e3e5afc259..84adb46b3e2fea599f069072f2f48190602092e9 100644 (file)
@@ -188,6 +188,7 @@ int stmpe_remove(struct stmpe *stmpe);
 #define STMPE1601_REG_GPIO_ED_MSB              0x8A
 #define STMPE1601_REG_GPIO_RE_LSB              0x8D
 #define STMPE1601_REG_GPIO_FE_LSB              0x8F
+#define STMPE1601_REG_GPIO_PU_LSB              0x91
 #define STMPE1601_REG_GPIO_AF_U_MSB            0x92
 
 #define STMPE1601_SYS_CTRL_ENABLE_GPIO         (1 << 3)
@@ -276,6 +277,8 @@ int stmpe_remove(struct stmpe *stmpe);
 #define STMPE24XX_REG_GPEDR_MSB                0x8C
 #define STMPE24XX_REG_GPRER_LSB                0x91
 #define STMPE24XX_REG_GPFER_LSB                0x94
+#define STMPE24XX_REG_GPPUR_LSB                0x97
+#define STMPE24XX_REG_GPPDR_LSB                0x9a
 #define STMPE24XX_REG_GPAFR_U_MSB      0x9B
 
 #define STMPE24XX_SYS_CTRL_ENABLE_GPIO         (1 << 3)
index 0d256cb002eb00480113435390fc19d5d92cb6cf..d6b764349f9d309956b36fd270ae1b6940e8167c 100644 (file)
@@ -125,10 +125,21 @@ int tps65218_clear_bits(struct tps65218 *tps, unsigned int reg,
 }
 EXPORT_SYMBOL_GPL(tps65218_clear_bits);
 
+static const struct regmap_range tps65218_yes_ranges[] = {
+       regmap_reg_range(TPS65218_REG_INT1, TPS65218_REG_INT2),
+       regmap_reg_range(TPS65218_REG_STATUS, TPS65218_REG_STATUS),
+};
+
+static const struct regmap_access_table tps65218_volatile_table = {
+       .yes_ranges = tps65218_yes_ranges,
+       .n_yes_ranges = ARRAY_SIZE(tps65218_yes_ranges),
+};
+
 static struct regmap_config tps65218_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
        .cache_type = REGCACHE_RBTREE,
+       .volatile_table = &tps65218_volatile_table,
 };
 
 static const struct regmap_irq tps65218_irqs[] = {
@@ -193,6 +204,7 @@ static struct regmap_irq_chip tps65218_irq_chip = {
 
        .num_regs = 2,
        .mask_base = TPS65218_REG_INT_MASK1,
+       .status_base = TPS65218_REG_INT1,
 };
 
 static const struct of_device_id of_tps65218_match_table[] = {
index 51fd6b524371ecd9ae762716e4cf33692c36ce45..d1b55fe62817dcd0261ab926a704a37f590ca67c 100644 (file)
@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
        return 0;
 }
 
+static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct cxl_context *ctx = vma->vm_file->private_data;
+       unsigned long address = (unsigned long)vmf->virtual_address;
+       u64 area, offset;
+
+       offset = vmf->pgoff << PAGE_SHIFT;
+
+       pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
+                       __func__, ctx->pe, address, offset);
+
+       if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+               area = ctx->afu->psn_phys;
+               if (offset > ctx->afu->adapter->ps_size)
+                       return VM_FAULT_SIGBUS;
+       } else {
+               area = ctx->psn_phys;
+               if (offset > ctx->psn_size)
+                       return VM_FAULT_SIGBUS;
+       }
+
+       mutex_lock(&ctx->status_mutex);
+
+       if (ctx->status != STARTED) {
+               mutex_unlock(&ctx->status_mutex);
+               pr_devel("%s: Context not started, failing problem state access\n", __func__);
+               return VM_FAULT_SIGBUS;
+       }
+
+       vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+
+       mutex_unlock(&ctx->status_mutex);
+
+       return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct cxl_mmap_vmops = {
+       .fault = cxl_mmap_fault,
+};
+
 /*
  * Map a per-context mmio space into the given vma.
  */
@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
        u64 len = vma->vm_end - vma->vm_start;
        len = min(len, ctx->psn_size);
 
-       if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-               return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size);
-       }
+       if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
+               /* make sure there is a valid per process space for this AFU */
+               if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
+                       pr_devel("AFU doesn't support mmio space\n");
+                       return -EINVAL;
+               }
 
-       /* make sure there is a valid per process space for this AFU */
-       if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
-               pr_devel("AFU doesn't support mmio space\n");
-               return -EINVAL;
+               /* Can't mmap until the AFU is enabled */
+               if (!ctx->afu->enabled)
+                       return -EBUSY;
        }
 
-       /* Can't mmap until the AFU is enabled */
-       if (!ctx->afu->enabled)
-               return -EBUSY;
-
        pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
                 ctx->psn_phys, ctx->pe , ctx->master);
 
+       vma->vm_flags |= VM_IO | VM_PFNMAP;
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       return vm_iomap_memory(vma, ctx->psn_phys, len);
+       vma->vm_ops = &cxl_mmap_vmops;
+       return 0;
 }
 
 /*
@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
        afu_release_irqs(ctx);
        flush_work(&ctx->fault_work); /* Only needed for dedicated process */
        wake_up_all(&ctx->wq);
-
-       /* Release Problem State Area mapping */
-       mutex_lock(&ctx->mapping_lock);
-       if (ctx->mapping)
-               unmap_mapping_range(ctx->mapping, 0, 0, 1);
-       mutex_unlock(&ctx->mapping_lock);
 }
 
 /*
@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
                 * created and torn down after the IDR removed
                 */
                __detach_context(ctx);
+
+               /*
+                * We are force detaching - remove any active PSA mappings so
+                * userspace cannot interfere with the card if it comes back.
+                * Easiest way to exercise this is to unbind and rebind the
+                * driver via sysfs while it is in use.
+                */
+               mutex_lock(&ctx->mapping_lock);
+               if (ctx->mapping)
+                       unmap_mapping_range(ctx->mapping, 0, 0, 1);
+               mutex_unlock(&ctx->mapping_lock);
        }
        mutex_unlock(&afu->contexts_lock);
 }
index e9f2f10dbb3734f3de4df60cdaed583415cfd831..b15d8113877c9f6ed5c45c58fb012fa6f50276aa 100644 (file)
@@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
 
        pr_devel("%s: pe: %i\n", __func__, ctx->pe);
 
-       mutex_lock(&ctx->status_mutex);
-       if (ctx->status != OPENED) {
-               rc = -EIO;
-               goto out;
-       }
-
+       /* Do this outside the status_mutex to avoid a circular dependency with
+        * the locking in cxl_mmap_fault() */
        if (copy_from_user(&work, uwork,
                           sizeof(struct cxl_ioctl_start_work))) {
                rc = -EFAULT;
                goto out;
        }
 
+       mutex_lock(&ctx->status_mutex);
+       if (ctx->status != OPENED) {
+               rc = -EIO;
+               goto out;
+       }
+
        /*
         * if any of the reserved fields are set or any of the unused
         * flags are set it's invalid
index ff2755062b4420cf3239a80e0f767d6dc333b6a6..06ff0a2ec96071c5b2d1c2c5dffdff7a5c03d938 100644 (file)
@@ -234,6 +234,18 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
        struct mei_me_hw *hw = to_me_hw(dev);
        u32 hcsr = mei_hcsr_read(hw);
 
+       /* H_RST may be found lit before reset is started,
+        * for example if preceding reset flow hasn't completed.
+        * In that case asserting H_RST will be ignored, therefore
+        * we need to clean H_RST bit to start a successful reset sequence.
+        */
+       if ((hcsr & H_RST) == H_RST) {
+               dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
+               hcsr &= ~H_RST;
+               mei_me_reg_write(hw, H_CSR, hcsr);
+               hcsr = mei_hcsr_read(hw);
+       }
+
        hcsr |= H_RST | H_IG | H_IS;
 
        if (intr_enable)
index 7aaaf51e1596c5dc399cfa096040822275a05bae..35f19a6838222e411e3ab4e427488915a9a841f8 100644 (file)
@@ -370,12 +370,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
                        to_copy = size - bytes_copied;
 
                if (is_iovec) {
-                       struct iovec *iov = (struct iovec *)src;
+                       struct msghdr *msg = (struct msghdr *)src;
                        int err;
 
                        /* The iovec will track bytes_copied internally. */
-                       err = memcpy_fromiovec((u8 *)va + page_offset,
-                                              iov, to_copy);
+                       err = memcpy_from_msg((u8 *)va + page_offset,
+                                             msg, to_copy);
                        if (err != 0) {
                                if (kernel_if->host)
                                        kunmap(kernel_if->u.h.page[page_index]);
@@ -580,7 +580,7 @@ static int qp_memcpy_from_queue(void *dest,
  */
 static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
                                  u64 queue_offset,
-                                 const void *src,
+                                 const void *msg,
                                  size_t src_offset, size_t size)
 {
 
@@ -588,7 +588,7 @@ static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
         * We ignore src_offset because src is really a struct iovec * and will
         * maintain offset internally.
         */
-       return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
+       return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
 }
 
 /*
@@ -3223,13 +3223,13 @@ EXPORT_SYMBOL_GPL(vmci_qpair_peek);
  * of bytes enqueued or < 0 on error.
  */
 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
-                         void *iov,
+                         struct msghdr *msg,
                          size_t iov_size,
                          int buf_type)
 {
        ssize_t result;
 
-       if (!qpair || !iov)
+       if (!qpair)
                return VMCI_ERROR_INVALID_ARGS;
 
        qp_lock(qpair);
@@ -3238,7 +3238,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
                result = qp_enqueue_locked(qpair->produce_q,
                                           qpair->consume_q,
                                           qpair->produce_q_size,
-                                          iov, iov_size,
+                                          msg, iov_size,
                                           qp_memcpy_to_queue_iov);
 
                if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
index e3e56d35f0eeee634a0930acce0500bb33cb3258..970314e0aac8f1f05e5b9ff199e28723b9d85a19 100644 (file)
@@ -247,6 +247,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
        { "INT33BB"  , "3" , &sdhci_acpi_slot_int_sd },
        { "INT33C6"  , NULL, &sdhci_acpi_slot_int_sdio },
        { "INT3436"  , NULL, &sdhci_acpi_slot_int_sdio },
+       { "INT344D"  , NULL, &sdhci_acpi_slot_int_sdio },
        { "PNP0D40"  },
        { },
 };
@@ -257,6 +258,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
        { "INT33BB"  },
        { "INT33C6"  },
        { "INT3436"  },
+       { "INT344D"  },
        { "PNP0D40"  },
        { },
 };
index 03427755b9029b297393d1d43851b4f678f2bbae..4f38554ce6797e0a461a3ddaf76261264229f107 100644 (file)
@@ -993,6 +993,31 @@ static const struct pci_device_id pci_ids[] = {
                .subdevice      = PCI_ANY_ID,
                .driver_data    = (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
        },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_SPT_EMMC,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_SPT_SDIO,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_SPT_SD,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sd,
+       },
+
        {
                .vendor         = PCI_VENDOR_ID_O2,
                .device         = PCI_DEVICE_ID_O2_8120,
index d57c3d169914e94e716b64b90e8b8da2b86afa87..1ec684d06d54733b35b2427d4007bfd5cfd3d52b 100644 (file)
@@ -21,6 +21,9 @@
 #define PCI_DEVICE_ID_INTEL_CLV_EMMC0  0x08e5
 #define PCI_DEVICE_ID_INTEL_CLV_EMMC1  0x08e6
 #define PCI_DEVICE_ID_INTEL_QRK_SD     0x08A7
+#define PCI_DEVICE_ID_INTEL_SPT_EMMC   0x9d2b
+#define PCI_DEVICE_ID_INTEL_SPT_SDIO   0x9d2c
+#define PCI_DEVICE_ID_INTEL_SPT_SD     0x9d2d
 
 /*
  * PCI registers
index 45238871192da1d6553268d82659dce3cc600d68..ca3424e7ef717c59a82da1e13a8ca3cf939da7f2 100644 (file)
@@ -300,13 +300,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
        if (IS_ERR(host))
                return PTR_ERR(host);
 
-       if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
-               ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
-               if (ret < 0)
-                       goto err_mbus_win;
-       }
-
-
        pltfm_host = sdhci_priv(host);
        pltfm_host->priv = pxa;
 
@@ -325,6 +318,12 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
        if (!IS_ERR(pxa->clk_core))
                clk_prepare_enable(pxa->clk_core);
 
+       if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
+               ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
+               if (ret < 0)
+                       goto err_mbus_win;
+       }
+
        /* enable 1/8V DDR capable */
        host->mmc->caps |= MMC_CAP_1_8V_DDR;
 
@@ -396,11 +395,11 @@ err_add_host:
        pm_runtime_disable(&pdev->dev);
 err_of_parse:
 err_cd_req:
+err_mbus_win:
        clk_disable_unprepare(pxa->clk_io);
        if (!IS_ERR(pxa->clk_core))
                clk_disable_unprepare(pxa->clk_core);
 err_clk_get:
-err_mbus_win:
        sdhci_pltfm_free(pdev);
        return ret;
 }
index cbb245b5853873cbddc2f1a0967c8bcc989ea4e1..f1a488ee432f891971f79707d78ca91a631b6c51 100644 (file)
@@ -259,8 +259,6 @@ static void sdhci_reinit(struct sdhci_host *host)
 
                del_timer_sync(&host->tuning_timer);
                host->flags &= ~SDHCI_NEEDS_RETUNING;
-               host->mmc->max_blk_count =
-                       (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
        }
        sdhci_enable_card_detection(host);
 }
@@ -1273,6 +1271,12 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
                spin_unlock_irq(&host->lock);
                mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
                spin_lock_irq(&host->lock);
+
+               if (mode != MMC_POWER_OFF)
+                       sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
+               else
+                       sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
                return;
        }
 
@@ -1353,6 +1357,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 
        sdhci_runtime_pm_get(host);
 
+       present = mmc_gpio_get_cd(host->mmc);
+
        spin_lock_irqsave(&host->lock, flags);
 
        WARN_ON(host->mrq != NULL);
@@ -1381,7 +1387,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
         *     zero: cd-gpio is used, and card is removed
         *     one: cd-gpio is used, and card is present
         */
-       present = mmc_gpio_get_cd(host->mmc);
        if (present < 0) {
                /* If polling, assume that the card is always present. */
                if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
@@ -1880,6 +1885,18 @@ static int sdhci_card_busy(struct mmc_host *mmc)
        return !(present_state & SDHCI_DATA_LVL_MASK);
 }
 
+static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+       unsigned long flags;
+
+       spin_lock_irqsave(&host->lock, flags);
+       host->flags |= SDHCI_HS400_TUNING;
+       spin_unlock_irqrestore(&host->lock, flags);
+
+       return 0;
+}
+
 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
 {
        struct sdhci_host *host = mmc_priv(mmc);
@@ -1887,10 +1904,18 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
        int tuning_loop_counter = MAX_TUNING_LOOP;
        int err = 0;
        unsigned long flags;
+       unsigned int tuning_count = 0;
+       bool hs400_tuning;
 
        sdhci_runtime_pm_get(host);
        spin_lock_irqsave(&host->lock, flags);
 
+       hs400_tuning = host->flags & SDHCI_HS400_TUNING;
+       host->flags &= ~SDHCI_HS400_TUNING;
+
+       if (host->tuning_mode == SDHCI_TUNING_MODE_1)
+               tuning_count = host->tuning_count;
+
        /*
         * The Host Controller needs tuning only in case of SDR104 mode
         * and for SDR50 mode when Use Tuning for SDR50 is set in the
@@ -1899,8 +1924,20 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
         * tuning function has to be executed.
         */
        switch (host->timing) {
+       /* HS400 tuning is done in HS200 mode */
        case MMC_TIMING_MMC_HS400:
+               err = -EINVAL;
+               goto out_unlock;
+
        case MMC_TIMING_MMC_HS200:
+               /*
+                * Periodic re-tuning for HS400 is not expected to be needed, so
+                * disable it here.
+                */
+               if (hs400_tuning)
+                       tuning_count = 0;
+               break;
+
        case MMC_TIMING_UHS_SDR104:
                break;
 
@@ -1911,9 +1948,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
                /* FALLTHROUGH */
 
        default:
-               spin_unlock_irqrestore(&host->lock, flags);
-               sdhci_runtime_pm_put(host);
-               return 0;
+               goto out_unlock;
        }
 
        if (host->ops->platform_execute_tuning) {
@@ -2037,24 +2072,11 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
        }
 
 out:
-       /*
-        * If this is the very first time we are here, we start the retuning
-        * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
-        * flag won't be set, we check this condition before actually starting
-        * the timer.
-        */
-       if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
-           (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
+       host->flags &= ~SDHCI_NEEDS_RETUNING;
+
+       if (tuning_count) {
                host->flags |= SDHCI_USING_RETUNING_TIMER;
-               mod_timer(&host->tuning_timer, jiffies +
-                       host->tuning_count * HZ);
-               /* Tuning mode 1 limits the maximum data length to 4MB */
-               mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
-       } else if (host->flags & SDHCI_USING_RETUNING_TIMER) {
-               host->flags &= ~SDHCI_NEEDS_RETUNING;
-               /* Reload the new initial value for timer */
-               mod_timer(&host->tuning_timer, jiffies +
-                         host->tuning_count * HZ);
+               mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ);
        }
 
        /*
@@ -2070,6 +2092,7 @@ out:
 
        sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
        sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+out_unlock:
        spin_unlock_irqrestore(&host->lock, flags);
        sdhci_runtime_pm_put(host);
 
@@ -2110,15 +2133,18 @@ static void sdhci_card_event(struct mmc_host *mmc)
 {
        struct sdhci_host *host = mmc_priv(mmc);
        unsigned long flags;
+       int present;
 
        /* First check if client has provided their own card event */
        if (host->ops->card_event)
                host->ops->card_event(host);
 
+       present = sdhci_do_get_cd(host);
+
        spin_lock_irqsave(&host->lock, flags);
 
        /* Check host->mrq first in case we are runtime suspended */
-       if (host->mrq && !sdhci_do_get_cd(host)) {
+       if (host->mrq && !present) {
                pr_err("%s: Card removed during transfer!\n",
                        mmc_hostname(host->mmc));
                pr_err("%s: Resetting controller.\n",
@@ -2142,6 +2168,7 @@ static const struct mmc_host_ops sdhci_ops = {
        .hw_reset       = sdhci_hw_reset,
        .enable_sdio_irq = sdhci_enable_sdio_irq,
        .start_signal_voltage_switch    = sdhci_start_signal_voltage_switch,
+       .prepare_hs400_tuning           = sdhci_prepare_hs400_tuning,
        .execute_tuning                 = sdhci_execute_tuning,
        .card_event                     = sdhci_card_event,
        .card_busy      = sdhci_card_busy,
@@ -3260,8 +3287,9 @@ int sdhci_add_host(struct sdhci_host *host)
                mmc->max_segs = SDHCI_MAX_SEGS;
 
        /*
-        * Maximum number of sectors in one transfer. Limited by DMA boundary
-        * size (512KiB).
+        * Maximum number of sectors in one transfer. Limited by SDMA boundary
+        * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
+        * is less anyway.
         */
        mmc->max_req_size = 524288;
 
index 8baa87df173841ea90e07895dbb0620d4abb0a8d..cfc4a9c1000abb109e95c3d233405ef57febe5c1 100644 (file)
@@ -467,11 +467,14 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
                /* set the partner sync. to on if the partner is sync,
                 * and the port is matched
                 */
-               if ((port->sm_vars & AD_PORT_MATCHED)
-                   && (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION))
+               if ((port->sm_vars & AD_PORT_MATCHED) &&
+                   (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) {
                        partner->port_state |= AD_STATE_SYNCHRONIZATION;
-               else
+                       pr_debug("%s partner sync=1\n", port->slave->dev->name);
+               } else {
                        partner->port_state &= ~AD_STATE_SYNCHRONIZATION;
+                       pr_debug("%s partner sync=0\n", port->slave->dev->name);
+               }
        }
 }
 
@@ -726,6 +729,8 @@ static inline void __update_lacpdu_from_port(struct port *port)
        lacpdu->actor_port_priority = htons(port->actor_port_priority);
        lacpdu->actor_port = htons(port->actor_port_number);
        lacpdu->actor_state = port->actor_oper_port_state;
+       pr_debug("update lacpdu: %s, actor port state %x\n",
+                port->slave->dev->name, port->actor_oper_port_state);
 
        /* lacpdu->reserved_3_1              initialized
         * lacpdu->tlv_type_partner_info     initialized
@@ -898,7 +903,9 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
                        if ((port->sm_vars & AD_PORT_SELECTED) &&
                            (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) &&
                            !__check_agg_selection_timer(port)) {
-                               port->sm_mux_state = AD_MUX_COLLECTING_DISTRIBUTING;
+                               if (port->aggregator->is_active)
+                                       port->sm_mux_state =
+                                           AD_MUX_COLLECTING_DISTRIBUTING;
                        } else if (!(port->sm_vars & AD_PORT_SELECTED) ||
                                   (port->sm_vars & AD_PORT_STANDBY)) {
                                /* if UNSELECTED or STANDBY */
@@ -910,12 +917,16 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
                                 */
                                __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
                                port->sm_mux_state = AD_MUX_DETACHED;
+                       } else if (port->aggregator->is_active) {
+                               port->actor_oper_port_state |=
+                                   AD_STATE_SYNCHRONIZATION;
                        }
                        break;
                case AD_MUX_COLLECTING_DISTRIBUTING:
                        if (!(port->sm_vars & AD_PORT_SELECTED) ||
                            (port->sm_vars & AD_PORT_STANDBY) ||
-                           !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION)) {
+                           !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) ||
+                           !(port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) {
                                port->sm_mux_state = AD_MUX_ATTACHED;
                        } else {
                                /* if port state hasn't changed make
@@ -937,8 +948,10 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
 
        /* check if the state machine was changed */
        if (port->sm_mux_state != last_state) {
-               pr_debug("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n",
-                        port->actor_port_number, last_state,
+               pr_debug("Mux Machine: Port=%d (%s), Last State=%d, Curr State=%d\n",
+                        port->actor_port_number,
+                        port->slave->dev->name,
+                        last_state,
                         port->sm_mux_state);
                switch (port->sm_mux_state) {
                case AD_MUX_DETACHED:
@@ -953,7 +966,12 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
                        port->sm_mux_timer_counter = __ad_timer_to_ticks(AD_WAIT_WHILE_TIMER, 0);
                        break;
                case AD_MUX_ATTACHED:
-                       port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION;
+                       if (port->aggregator->is_active)
+                               port->actor_oper_port_state |=
+                                   AD_STATE_SYNCHRONIZATION;
+                       else
+                               port->actor_oper_port_state &=
+                                   ~AD_STATE_SYNCHRONIZATION;
                        port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
                        port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
                        ad_disable_collecting_distributing(port,
@@ -963,6 +981,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
                case AD_MUX_COLLECTING_DISTRIBUTING:
                        port->actor_oper_port_state |= AD_STATE_COLLECTING;
                        port->actor_oper_port_state |= AD_STATE_DISTRIBUTING;
+                       port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION;
                        ad_enable_collecting_distributing(port,
                                                          update_slave_arr);
                        port->ntt = true;
@@ -1044,8 +1063,10 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
 
        /* check if the State machine was changed or new lacpdu arrived */
        if ((port->sm_rx_state != last_state) || (lacpdu)) {
-               pr_debug("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n",
-                        port->actor_port_number, last_state,
+               pr_debug("Rx Machine: Port=%d (%s), Last State=%d, Curr State=%d\n",
+                        port->actor_port_number,
+                        port->slave->dev->name,
+                        last_state,
                         port->sm_rx_state);
                switch (port->sm_rx_state) {
                case AD_RX_INITIALIZE:
@@ -1394,6 +1415,9 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
 
        aggregator = __get_first_agg(port);
        ad_agg_selection_logic(aggregator, update_slave_arr);
+
+       if (!port->aggregator->is_active)
+               port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
 }
 
 /* Decide if "agg" is a better choice for the new active aggregator that
@@ -2195,8 +2219,10 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
                switch (lacpdu->subtype) {
                case AD_TYPE_LACPDU:
                        ret = RX_HANDLER_CONSUMED;
-                       netdev_dbg(slave->bond->dev, "Received LACPDU on port %d\n",
-                                  port->actor_port_number);
+                       netdev_dbg(slave->bond->dev,
+                                  "Received LACPDU on port %d slave %s\n",
+                                  port->actor_port_number,
+                                  slave->dev->name);
                        /* Protect against concurrent state machines */
                        spin_lock(&slave->bond->mode_lock);
                        ad_rx_machine(lacpdu, port);
@@ -2288,7 +2314,10 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
        port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
        port->actor_oper_port_key = port->actor_admin_port_key |=
                __get_duplex(port);
-       netdev_dbg(slave->bond->dev, "Port %d changed duplex\n", port->actor_port_number);
+       netdev_dbg(slave->bond->dev, "Port %d slave %s changed duplex\n",
+                  port->actor_port_number, slave->dev->name);
+       if (port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)
+               port->sm_vars |= AD_PORT_LACP_ENABLED;
        /* there is no need to reselect a new aggregator, just signal the
         * state machines to reinitialize
         */
index 0dceba1a2ba15f4706922a5423f680e7cd17ef77..679ef00d6b16a102dd9681121b6ae91cc3714a67 100644 (file)
@@ -77,6 +77,7 @@
 #include <net/pkt_sched.h>
 #include <linux/rculist.h>
 #include <net/flow_keys.h>
+#include <net/switchdev.h>
 #include <net/bonding.h>
 #include <net/bond_3ad.h>
 #include <net/bond_alb.h>
@@ -334,7 +335,7 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
  *
  * Returns zero if carrier state does not change, nonzero if it does.
  */
-static int bond_set_carrier(struct bonding *bond)
+int bond_set_carrier(struct bonding *bond)
 {
        struct list_head *iter;
        struct slave *slave;
@@ -789,7 +790,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                        }
 
                        new_active->delay = 0;
-                       new_active->link = BOND_LINK_UP;
+                       bond_set_slave_link_state(new_active, BOND_LINK_UP);
 
                        if (BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
@@ -979,7 +980,11 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
        netdev_features_t mask;
        struct slave *slave;
 
-       mask = features;
+       /* If any slave has the offload feature flag set,
+        * set the offload flag on the bond.
+        */
+       mask = features | NETIF_F_HW_SWITCH_OFFLOAD;
+
        features &= ~NETIF_F_ONE_FOR_ALL;
        features |= NETIF_F_ALL_FOR_ALL;
 
@@ -998,7 +1003,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
                                 NETIF_F_HIGHDMA | NETIF_F_LRO)
 
 #define BOND_ENC_FEATURES      (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\
-                                NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL)
+                                NETIF_F_TSO)
 
 static void bond_compute_features(struct bonding *bond)
 {
@@ -1034,7 +1039,7 @@ static void bond_compute_features(struct bonding *bond)
 
 done:
        bond_dev->vlan_features = vlan_features;
-       bond_dev->hw_enc_features = enc_features;
+       bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
        bond_dev->hard_header_len = max_hard_header_len;
        bond_dev->gso_max_segs = gso_max_segs;
        netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -1176,6 +1181,62 @@ static void bond_free_slave(struct slave *slave)
        kfree(slave);
 }
 
+static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
+{
+       info->bond_mode = BOND_MODE(bond);
+       info->miimon = bond->params.miimon;
+       info->num_slaves = bond->slave_cnt;
+}
+
+static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
+{
+       strcpy(info->slave_name, slave->dev->name);
+       info->link = slave->link;
+       info->state = bond_slave_state(slave);
+       info->link_failure_count = slave->link_failure_count;
+}
+
+static void bond_netdev_notify(struct slave *slave, struct net_device *dev)
+{
+       struct bonding *bond = slave->bond;
+       struct netdev_bonding_info bonding_info;
+
+       rtnl_lock();
+       /* make sure that slave is still valid */
+       if (dev->priv_flags & IFF_BONDING) {
+               bond_fill_ifslave(slave, &bonding_info.slave);
+               bond_fill_ifbond(bond, &bonding_info.master);
+               netdev_bonding_info_change(slave->dev, &bonding_info);
+       }
+       rtnl_unlock();
+}
+
+static void bond_netdev_notify_work(struct work_struct *_work)
+{
+       struct netdev_notify_work *w =
+               container_of(_work, struct netdev_notify_work, work.work);
+
+       bond_netdev_notify(w->slave, w->dev);
+       dev_put(w->dev);
+}
+
+void bond_queue_slave_event(struct slave *slave)
+{
+       struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
+
+       if (!nnw)
+               return;
+
+       INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
+       nnw->slave = slave;
+       nnw->dev = slave->dev;
+
+       if (queue_delayed_work(slave->bond->wq, &nnw->work, 0))
+               dev_hold(slave->dev);
+       else
+               kfree(nnw);
+}
+
 /* enslave device <slave> to bond device <master> */
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 {
@@ -1439,19 +1500,22 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        if (bond->params.miimon) {
                if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
                        if (bond->params.updelay) {
-                               new_slave->link = BOND_LINK_BACK;
+                               bond_set_slave_link_state(new_slave,
+                                                         BOND_LINK_BACK);
                                new_slave->delay = bond->params.updelay;
                        } else {
-                               new_slave->link = BOND_LINK_UP;
+                               bond_set_slave_link_state(new_slave,
+                                                         BOND_LINK_UP);
                        }
                } else {
-                       new_slave->link = BOND_LINK_DOWN;
+                       bond_set_slave_link_state(new_slave, BOND_LINK_DOWN);
                }
        } else if (bond->params.arp_interval) {
-               new_slave->link = (netif_carrier_ok(slave_dev) ?
-                       BOND_LINK_UP : BOND_LINK_DOWN);
+               bond_set_slave_link_state(new_slave,
+                                         (netif_carrier_ok(slave_dev) ?
+                                         BOND_LINK_UP : BOND_LINK_DOWN));
        } else {
-               new_slave->link = BOND_LINK_UP;
+               bond_set_slave_link_state(new_slave, BOND_LINK_UP);
        }
 
        if (new_slave->link != BOND_LINK_DOWN)
@@ -1567,6 +1631,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                    new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
 
        /* enslave is successful */
+       bond_queue_slave_event(new_slave);
        return 0;
 
 /* Undo stages on error */
@@ -1816,11 +1881,7 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
 static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-
-       info->bond_mode = BOND_MODE(bond);
-       info->miimon = bond->params.miimon;
-       info->num_slaves = bond->slave_cnt;
-
+       bond_fill_ifbond(bond, info);
        return 0;
 }
 
@@ -1834,10 +1895,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
        bond_for_each_slave(bond, slave, iter) {
                if (i++ == (int)info->slave_id) {
                        res = 0;
-                       strcpy(info->slave_name, slave->dev->name);
-                       info->link = slave->link;
-                       info->state = bond_slave_state(slave);
-                       info->link_failure_count = slave->link_failure_count;
+                       bond_fill_ifslave(slave, info);
                        break;
                }
        }
@@ -1867,7 +1925,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                        if (link_state)
                                continue;
 
-                       slave->link = BOND_LINK_FAIL;
+                       bond_set_slave_link_state(slave, BOND_LINK_FAIL);
                        slave->delay = bond->params.downdelay;
                        if (slave->delay) {
                                netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
@@ -1882,7 +1940,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                case BOND_LINK_FAIL:
                        if (link_state) {
                                /* recovered before downdelay expired */
-                               slave->link = BOND_LINK_UP;
+                               bond_set_slave_link_state(slave, BOND_LINK_UP);
                                slave->last_link_up = jiffies;
                                netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
                                            (bond->params.downdelay - slave->delay) *
@@ -1904,7 +1962,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                        if (!link_state)
                                continue;
 
-                       slave->link = BOND_LINK_BACK;
+                       bond_set_slave_link_state(slave, BOND_LINK_BACK);
                        slave->delay = bond->params.updelay;
 
                        if (slave->delay) {
@@ -1917,7 +1975,8 @@ static int bond_miimon_inspect(struct bonding *bond)
                        /*FALLTHRU*/
                case BOND_LINK_BACK:
                        if (!link_state) {
-                               slave->link = BOND_LINK_DOWN;
+                               bond_set_slave_link_state(slave,
+                                                         BOND_LINK_DOWN);
                                netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
                                            (bond->params.updelay - slave->delay) *
                                            bond->params.miimon,
@@ -1955,7 +2014,7 @@ static void bond_miimon_commit(struct bonding *bond)
                        continue;
 
                case BOND_LINK_UP:
-                       slave->link = BOND_LINK_UP;
+                       bond_set_slave_link_state(slave, BOND_LINK_UP);
                        slave->last_link_up = jiffies;
 
                        primary = rtnl_dereference(bond->primary_slave);
@@ -1995,7 +2054,7 @@ static void bond_miimon_commit(struct bonding *bond)
                        if (slave->link_failure_count < UINT_MAX)
                                slave->link_failure_count++;
 
-                       slave->link = BOND_LINK_DOWN;
+                       bond_set_slave_link_state(slave, BOND_LINK_DOWN);
 
                        if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
                            BOND_MODE(bond) == BOND_MODE_8023AD)
@@ -2578,7 +2637,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
                                struct slave *current_arp_slave;
 
                                current_arp_slave = rtnl_dereference(bond->current_arp_slave);
-                               slave->link = BOND_LINK_UP;
+                               bond_set_slave_link_state(slave, BOND_LINK_UP);
                                if (current_arp_slave) {
                                        bond_set_slave_inactive_flags(
                                                current_arp_slave,
@@ -2601,7 +2660,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
                        if (slave->link_failure_count < UINT_MAX)
                                slave->link_failure_count++;
 
-                       slave->link = BOND_LINK_DOWN;
+                       bond_set_slave_link_state(slave, BOND_LINK_DOWN);
                        bond_set_slave_inactive_flags(slave,
                                                      BOND_SLAVE_NOTIFY_NOW);
 
@@ -2680,7 +2739,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
                 * up when it is actually down
                 */
                if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
-                       slave->link = BOND_LINK_DOWN;
+                       bond_set_slave_link_state(slave, BOND_LINK_DOWN);
                        if (slave->link_failure_count < UINT_MAX)
                                slave->link_failure_count++;
 
@@ -2700,7 +2759,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
        if (!new_slave)
                goto check_state;
 
-       new_slave->link = BOND_LINK_BACK;
+       bond_set_slave_link_state(new_slave, BOND_LINK_BACK);
        bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
        bond_arp_send_all(bond, new_slave);
        new_slave->last_link_up = jiffies;
@@ -3066,7 +3125,7 @@ static int bond_open(struct net_device *bond_dev)
                            slave != rcu_access_pointer(bond->curr_active_slave)) {
                                bond_set_slave_inactive_flags(slave,
                                                              BOND_SLAVE_NOTIFY_NOW);
-                       } else {
+                       } else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
                                bond_set_slave_active_flags(slave,
                                                            BOND_SLAVE_NOTIFY_NOW);
                        }
@@ -3734,7 +3793,7 @@ out:
  * usable slave array is formed in the control path. The xmit function
  * just calculates hash and sends the packet out.
  */
-int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
+static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct bonding *bond = netdev_priv(dev);
        struct slave *slave;
@@ -3952,6 +4011,8 @@ static const struct net_device_ops bond_netdev_ops = {
        .ndo_add_slave          = bond_enslave,
        .ndo_del_slave          = bond_release,
        .ndo_fix_features       = bond_fix_features,
+       .ndo_bridge_setlink     = ndo_dflt_netdev_switch_port_bridge_setlink,
+       .ndo_bridge_dellink     = ndo_dflt_netdev_switch_port_bridge_dellink,
 };
 
 static const struct device_type bond_type = {
@@ -4010,7 +4071,7 @@ void bond_setup(struct net_device *bond_dev)
                                NETIF_F_HW_VLAN_CTAG_FILTER;
 
        bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
-       bond_dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+       bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
        bond_dev->features |= bond_dev->hw_features;
 }
 
index 1a61cc9b3402b1e96e973cf88f5e51bb4d56bad2..4df28943d2229035166d2bb4a72ec11c8f9671c5 100644 (file)
@@ -186,7 +186,7 @@ static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
        { NULL,  -1, 0}
 };
 
-static const struct bond_option bond_opts[] = {
+static const struct bond_option bond_opts[BOND_OPT_LAST] = {
        [BOND_OPT_MODE] = {
                .id = BOND_OPT_MODE,
                .name = "mode",
@@ -379,8 +379,7 @@ static const struct bond_option bond_opts[] = {
                .values = bond_tlb_dynamic_lb_tbl,
                .flags = BOND_OPTFLAG_IFDOWN,
                .set = bond_option_tlb_dynamic_lb_set,
-       },
-       { }
+       }
 };
 
 /* Searches for an option by name */
@@ -1182,6 +1181,7 @@ static int bond_option_min_links_set(struct bonding *bond,
        netdev_info(bond->dev, "Setting min links value to %llu\n",
                    newval->value);
        bond->params.min_links = newval->value;
+       bond_set_carrier(bond);
 
        return 0;
 }
index 5e40a8b68cbe1964a242868ee978551442a3bfd0..b3b922adc0e4f68ed15ff34537c21e1bd7e5e81f 100644 (file)
@@ -1415,7 +1415,6 @@ static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
 
        cfhsi = netdev_priv(dev);
        cfhsi_netlink_parms(data, cfhsi);
-       dev_net_set(cfhsi->ndev, src_net);
 
        get_ops = symbol_get(cfhsi_get_ops);
        if (!get_ops) {
index d0c2463b573f43ce22235b119dc17303d226fa8c..eeb4b8b6b335c293863f48fe5ac9a92f89f0127c 100644 (file)
@@ -138,7 +138,6 @@ struct at91_devtype_data {
 
 struct at91_priv {
        struct can_priv can;            /* must be the first member! */
-       struct net_device *dev;
        struct napi_struct napi;
 
        void __iomem *reg_base;
@@ -1350,7 +1349,6 @@ static int at91_can_probe(struct platform_device *pdev)
        priv->can.do_get_berr_counter = at91_get_berr_counter;
        priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
                CAN_CTRLMODE_LISTENONLY;
-       priv->dev = dev;
        priv->reg_base = addr;
        priv->devtype_data = *devtype_data;
        priv->clk = clk;
index 417d50998e314f8c440c401b20383af03ece00e2..e7a6363e736b6f2b35256dfd8fb957cf62abdbe0 100644 (file)
@@ -352,6 +352,7 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
                netdev_dbg(dev, "bus-off mode interrupt\n");
                state = CAN_STATE_BUS_OFF;
                cf->can_id |= CAN_ERR_BUSOFF;
+               priv->can.can_stats.bus_off++;
                can_bus_off(dev);
        }
 
index f94a9fa60488ed8e23523f5f8c3665133f039dbb..041525d2595ced06ee4363bb09f5fe62dfe5b7ec 100644 (file)
@@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev)
 
        c_can_irq_control(priv, false);
 
+       /* put ctrl to init on stop to end ongoing transmission */
+       priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
+
        /* deactivate pins */
        pinctrl_pm_select_sleep_state(dev->dev.parent);
        priv->can.state = CAN_STATE_STOPPED;
@@ -866,7 +869,7 @@ static int c_can_handle_state_change(struct net_device *dev,
        case C_CAN_BUS_OFF:
                /* bus-off state */
                priv->can.state = CAN_STATE_BUS_OFF;
-               can_bus_off(dev);
+               priv->can.can_stats.bus_off++;
                break;
        default:
                break;
index f363972cd77d9a5a44267811a5db48c8d40ed397..e36d10520e248cd2b4d67bee15b1876cccf994d3 100644 (file)
@@ -103,27 +103,34 @@ static void c_can_hw_raminit_syscon(const struct c_can_priv *priv, bool enable)
        mask = 1 << raminit->bits.start | 1 << raminit->bits.done;
        regmap_read(raminit->syscon, raminit->reg, &ctrl);
 
-       /* We clear the done and start bit first. The start bit is
+       /* We clear the start bit first. The start bit is
         * looking at the 0 -> transition, but is not self clearing;
-        * And we clear the init done bit as well.
         * NOTE: DONE must be written with 1 to clear it.
+        * We can't clear the DONE bit here using regmap_update_bits()
+        * as it will bypass the write if initial condition is START:0 DONE:1
+        * e.g. on DRA7 which needs START pulse.
         */
-       ctrl &= ~(1 << raminit->bits.start);
-       ctrl |= 1 << raminit->bits.done;
-       regmap_write(raminit->syscon, raminit->reg, ctrl);
+       ctrl &= ~mask;  /* START = 0, DONE = 0 */
+       regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl);
 
-       ctrl &= ~(1 << raminit->bits.done);
-       c_can_hw_raminit_wait_syscon(priv, mask, ctrl);
+       /* check if START bit is 0. Ignore DONE bit for now
+        * as it can be either 0 or 1.
+        */
+       c_can_hw_raminit_wait_syscon(priv, 1 << raminit->bits.start, ctrl);
 
        if (enable) {
-               /* Set start bit and wait for the done bit. */
+               /* Clear DONE bit & set START bit. */
                ctrl |= 1 << raminit->bits.start;
-               regmap_write(raminit->syscon, raminit->reg, ctrl);
-
+               /* DONE must be written with 1 to clear it */
+               ctrl |= 1 << raminit->bits.done;
+               regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl);
+               /* prevent further clearing of DONE bit */
+               ctrl &= ~(1 << raminit->bits.done);
                /* clear START bit if start pulse is needed */
                if (raminit->needs_pulse) {
                        ctrl &= ~(1 << raminit->bits.start);
-                       regmap_write(raminit->syscon, raminit->reg, ctrl);
+                       regmap_update_bits(raminit->syscon, raminit->reg,
+                                          mask, ctrl);
                }
 
                ctrl |= 1 << raminit->bits.done;
index c486fe510f370957944f9082e1712cfa3cb7aa8f..c11d4498403617e4dddcea42d085e08355c9a5d2 100644 (file)
@@ -535,6 +535,7 @@ static int cc770_err(struct net_device *dev, u8 status)
                cc770_write_reg(priv, control, CTRL_INI);
                cf->can_id |= CAN_ERR_BUSOFF;
                priv->can.state = CAN_STATE_BUS_OFF;
+               priv->can.can_stats.bus_off++;
                can_bus_off(dev);
        } else if (status & STAT_WARN) {
                cf->can_id |= CAN_ERR_CRTL;
index 3ec8f6f25e5f979e16838930295fe4cd66b2841c..3c82e02e3daee633b65274abce48a4ae71257d5a 100644 (file)
@@ -289,9 +289,11 @@ static void can_update_state_error_stats(struct net_device *dev,
                priv->can_stats.error_passive++;
                break;
        case CAN_STATE_BUS_OFF:
+               priv->can_stats.bus_off++;
+               break;
        default:
                break;
-       };
+       }
 }
 
 static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
@@ -544,7 +546,6 @@ void can_bus_off(struct net_device *dev)
        netdev_dbg(dev, "bus-off\n");
 
        netif_carrier_off(dev);
-       priv->can_stats.bus_off++;
 
        if (priv->restart_ms)
                mod_timer(&priv->restart_timer,
@@ -807,10 +808,14 @@ static int can_changelink(struct net_device *dev,
                if (dev->flags & IFF_UP)
                        return -EBUSY;
                cm = nla_data(data[IFLA_CAN_CTRLMODE]);
-               if (cm->flags & ~priv->ctrlmode_supported)
+
+               /* check whether changed bits are allowed to be modified */
+               if (cm->mask & ~priv->ctrlmode_supported)
                        return -EOPNOTSUPP;
+
+               /* clear bits to be modified and copy the flag values */
                priv->ctrlmode &= ~cm->mask;
-               priv->ctrlmode |= cm->flags;
+               priv->ctrlmode |= (cm->flags & cm->mask);
 
                /* CAN_CTRLMODE_FD can only be set when driver supports FD */
                if (priv->ctrlmode & CAN_CTRLMODE_FD)
index b1d583ba967482f4039af3c372cf9df46d229248..80c46ad4cee439d2015b3ee7de66d578f54afd0b 100644 (file)
@@ -247,7 +247,6 @@ struct flexcan_devtype_data {
 
 struct flexcan_priv {
        struct can_priv can;
-       struct net_device *dev;
        struct napi_struct napi;
 
        void __iomem *base;
@@ -1220,7 +1219,6 @@ static int flexcan_probe(struct platform_device *pdev)
                CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES |
                CAN_CTRLMODE_BERR_REPORTING;
        priv->base = base;
-       priv->dev = dev;
        priv->clk_ipg = clk_ipg;
        priv->clk_per = clk_per;
        priv->pdata = dev_get_platdata(&pdev->dev);
index 1b118394907f0ef46c20e24b778bc9ce56e40b7b..4dd183a3643ae3cbe358455f2b099f54dda7870e 100644 (file)
@@ -1008,6 +1008,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
                if (status & SR_BS) {
                        state = CAN_STATE_BUS_OFF;
                        cf->can_id |= CAN_ERR_BUSOFF;
+                       mod->can.can_stats.bus_off++;
                        can_bus_off(dev);
                } else if (status & SR_ES) {
                        if (rxerr >= 128 || txerr >= 128)
@@ -1678,8 +1679,7 @@ static int ican3_get_berr_counter(const struct net_device *ndev,
        if (ret)
                return ret;
 
-       ret = wait_for_completion_timeout(&mod->buserror_comp, HZ);
-       if (ret == 0) {
+       if (!wait_for_completion_timeout(&mod->buserror_comp, HZ)) {
                netdev_info(mod->ndev, "%s timed out\n", __func__);
                return -ETIMEDOUT;
        }
@@ -1704,8 +1704,7 @@ static ssize_t ican3_sysfs_show_term(struct device *dev,
        if (ret)
                return ret;
 
-       ret = wait_for_completion_timeout(&mod->termination_comp, HZ);
-       if (ret == 0) {
+       if (!wait_for_completion_timeout(&mod->termination_comp, HZ)) {
                netdev_info(mod->ndev, "%s timed out\n", __func__);
                return -ETIMEDOUT;
        }
index d7bc462aafdc27a26774aa6cb6cadb0e0d8547a3..2e04b3aeeb374101b54cfe4dc0506df3a1f3420d 100644 (file)
@@ -589,6 +589,7 @@ static int m_can_handle_state_change(struct net_device *dev,
                /* bus-off state */
                priv->can.state = CAN_STATE_BUS_OFF;
                m_can_disable_all_interrupts(priv);
+               priv->can.can_stats.bus_off++;
                can_bus_off(dev);
                break;
        default:
@@ -955,6 +956,11 @@ static struct net_device *alloc_m_can_dev(void)
        priv->can.data_bittiming_const = &m_can_data_bittiming_const;
        priv->can.do_set_mode = m_can_set_mode;
        priv->can.do_get_berr_counter = m_can_get_berr_counter;
+
+       /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
+       priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
+
+       /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
        priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
                                        CAN_CTRLMODE_LISTENONLY |
                                        CAN_CTRLMODE_BERR_REPORTING |
index a67eb01f3028fee64837fae7878a6b78c2728d6c..e187ca783da0946def7585ff8d85ac76ea201e05 100644 (file)
@@ -505,6 +505,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
                pch_can_set_rx_all(priv, 0);
                state = CAN_STATE_BUS_OFF;
                cf->can_id |= CAN_ERR_BUSOFF;
+               priv->can.can_stats.bus_off++;
                can_bus_off(ndev);
        }
 
index 91cd48ca0efcf59c768155aa7ac517998eabbee4..7deb80dcbe8c09b669f15e3f5dfb3af3fc72598e 100644 (file)
@@ -331,6 +331,7 @@ static void rcar_can_error(struct net_device *ndev)
                priv->can.state = CAN_STATE_BUS_OFF;
                /* Clear interrupt condition */
                writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
+               priv->can.can_stats.bus_off++;
                can_bus_off(ndev);
                if (skb)
                        cf->can_id |= CAN_ERR_BUSOFF;
index 2bf98d862302eba41eaa6c4495c787d24451814c..7621f91a8a209e9de74d55de14fbb31947c449ee 100644 (file)
@@ -261,6 +261,7 @@ static int softing_handle_1(struct softing *card)
                                ++priv->can.can_stats.error_passive;
                        else if (can_state == CAN_STATE_BUS_OFF) {
                                /* this calls can_close_cleanup() */
+                               ++priv->can.can_stats.bus_off;
                                can_bus_off(netdev);
                                netif_stop_queue(netdev);
                        }
index c66d699640a9c1478026da5df023264a7b1ff973..bf63fee4e743a4c63c59de2445ef1ad651baa7c2 100644 (file)
@@ -905,6 +905,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
                if (priv->can.state == CAN_STATE_BUS_OFF) {
                        if (priv->can.restart_ms == 0) {
                                priv->force_quit = 1;
+                               priv->can.can_stats.bus_off++;
                                can_bus_off(net);
                                mcp251x_hw_sleep(spi);
                                break;
index 9a07eafe554b146135505aa50ca15361bac85921..e95a9e1a889f19c4673d9735e5e932eb83340767 100644 (file)
@@ -715,6 +715,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
                hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
                /* Disable all interrupts in bus-off to avoid int hog */
                hecc_write(priv, HECC_CANGIM, 0);
+               ++priv->can.can_stats.bus_off;
                can_bus_off(ndev);
        }
 
index a77db919363c08baa2c76dbb35c60701ea2c68ff..bcb272f6c68a9ac242964af7901bf6597a499306 100644 (file)
@@ -25,7 +25,7 @@ config CAN_KVASER_USB
        tristate "Kvaser CAN/USB interface"
        ---help---
          This driver adds support for Kvaser CAN/USB devices like Kvaser
-         Leaf Light.
+         Leaf Light and Kvaser USBcan II.
 
          The driver provides support for the following devices:
            - Kvaser Leaf Light
@@ -46,6 +46,12 @@ config CAN_KVASER_USB
            - Kvaser USBcan R
            - Kvaser Leaf Light v2
            - Kvaser Mini PCI Express HS
+           - Kvaser USBcan II HS/HS
+           - Kvaser USBcan II HS/LS
+           - Kvaser USBcan Rugged ("USBcan Rev B")
+           - Kvaser Memorator HS/HS
+           - Kvaser Memorator HS/LS
+           - Scania VCI2 (if you have the Kvaser logo on top)
 
          If unsure, say N.
 
@@ -53,10 +59,18 @@ config CAN_KVASER_USB
          module will be called kvaser_usb.
 
 config CAN_PEAK_USB
-       tristate "PEAK PCAN-USB/USB Pro interfaces"
+       tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD"
        ---help---
-         This driver supports the PCAN-USB and PCAN-USB Pro adapters
-         from PEAK-System Technik (http://www.peak-system.com).
+         This driver supports the PEAK-System Technik USB adapters that enable
+         access to the CAN bus, with repect to the CAN 2.0b and/or CAN-FD
+         standards, that is:
+
+         PCAN-USB             single CAN 2.0b channel USB adapter
+         PCAN-USB Pro         dual CAN 2.0b channels USB adapter
+         PCAN-USB FD          single CAN-FD channel USB adapter
+         PCAN-USB Pro FD      dual CAN-FD channels USB adapter
+
+         (see also http://www.peak-system.com).
 
 config CAN_8DEV_USB
        tristate "8 devices USB2CAN interface"
index 29d3f0938eb836b4da53a0898f574ebf42721ae2..9376f5e5b94ed2956c85808c5642ccbdb34036bd 100644 (file)
@@ -347,6 +347,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
                        dev->can.state = CAN_STATE_BUS_OFF;
                        cf->can_id |= CAN_ERR_BUSOFF;
 
+                       dev->can.can_stats.bus_off++;
                        can_bus_off(dev->netdev);
                } else if (state & SJA1000_SR_ES) {
                        dev->can.state = CAN_STATE_ERROR_WARNING;
index c063a54ab8dd8a598f36e5a7e712722bb5931df1..bacca0bd89c1ffc515bb352b73d4801de30c9f4a 100644 (file)
@@ -250,6 +250,7 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
                        case ESD_BUSSTATE_BUSOFF:
                                priv->can.state = CAN_STATE_BUS_OFF;
                                cf->can_id |= CAN_ERR_BUSOFF;
+                               priv->can.can_stats.bus_off++;
                                can_bus_off(priv->netdev);
                                break;
                        case ESD_BUSSTATE_WARN:
index 541fb7a05625aaf889089704ec155956629e77c8..2928f7003041d92c099d31d19621f1a429ea0848 100644 (file)
@@ -6,10 +6,12 @@
  * Parts of this driver are based on the following:
  *  - Kvaser linux leaf driver (version 4.78)
  *  - CAN driver for esd CAN-USB/2
+ *  - Kvaser linux usbcanII driver (version 5.3)
  *
  * Copyright (C) 2002-2006 KVASER AB, Sweden. All rights reserved.
  * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
  * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+ * Copyright (C) 2015 Valeo S.A.
  */
 
 #include <linux/completion.h>
@@ -30,8 +32,9 @@
 #define RX_BUFFER_SIZE                 3072
 #define CAN_USB_CLOCK                  8000000
 #define MAX_NET_DEVICES                        3
+#define MAX_USBCAN_NET_DEVICES         2
 
-/* Kvaser USB devices */
+/* Kvaser Leaf USB devices */
 #define KVASER_VENDOR_ID               0x0bfd
 #define USB_LEAF_DEVEL_PRODUCT_ID      10
 #define USB_LEAF_LITE_PRODUCT_ID       11
 #define USB_LEAF_LITE_V2_PRODUCT_ID    288
 #define USB_MINI_PCIE_HS_PRODUCT_ID    289
 
+static inline bool kvaser_is_leaf(const struct usb_device_id *id)
+{
+       return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
+              id->idProduct <= USB_MINI_PCIE_HS_PRODUCT_ID;
+}
+
+/* Kvaser USBCan-II devices */
+#define USB_USBCAN_REVB_PRODUCT_ID     2
+#define USB_VCI2_PRODUCT_ID            3
+#define USB_USBCAN2_PRODUCT_ID         4
+#define USB_MEMORATOR_PRODUCT_ID       5
+
+static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
+{
+       return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
+              id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
+}
+
 /* USB devices features */
 #define KVASER_HAS_SILENT_MODE         BIT(0)
 #define KVASER_HAS_TXRX_ERRORS         BIT(1)
@@ -73,7 +94,7 @@
 #define MSG_FLAG_TX_ACK                        BIT(6)
 #define MSG_FLAG_TX_REQUEST            BIT(7)
 
-/* Can states */
+/* Can states (M16C CxSTRH register) */
 #define M16C_STATE_BUS_RESET           BIT(0)
 #define M16C_STATE_BUS_ERROR           BIT(4)
 #define M16C_STATE_BUS_PASSIVE         BIT(5)
 #define CMD_START_CHIP_REPLY           27
 #define CMD_STOP_CHIP                  28
 #define CMD_STOP_CHIP_REPLY            29
-#define CMD_GET_CARD_INFO2             32
+
+#define CMD_LEAF_GET_CARD_INFO2                32
+#define CMD_USBCAN_RESET_CLOCK         32
+#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT        33
+
 #define CMD_GET_CARD_INFO              34
 #define CMD_GET_CARD_INFO_REPLY                35
 #define CMD_GET_SOFTWARE_INFO          38
 #define CMD_RESET_ERROR_COUNTER                49
 #define CMD_TX_ACKNOWLEDGE             50
 #define CMD_CAN_ERROR_EVENT            51
-#define CMD_USB_THROTTLE               77
-#define CMD_LOG_MESSAGE                        106
+
+#define CMD_LEAF_USB_THROTTLE          77
+#define CMD_LEAF_LOG_MESSAGE           106
 
 /* error factors */
 #define M16C_EF_ACKE                   BIT(0)
 #define M16C_EF_RCVE                   BIT(6)
 #define M16C_EF_TRE                    BIT(7)
 
+/* Only Leaf-based devices can report M16C error factors,
+ * thus define our own error status flags for USBCANII
+ */
+#define USBCAN_ERROR_STATE_NONE                0
+#define USBCAN_ERROR_STATE_TX_ERROR    BIT(0)
+#define USBCAN_ERROR_STATE_RX_ERROR    BIT(1)
+#define USBCAN_ERROR_STATE_BUSERROR    BIT(2)
+
 /* bittiming parameters */
 #define KVASER_USB_TSEG1_MIN           1
 #define KVASER_USB_TSEG1_MAX           16
 #define KVASER_CTRL_MODE_SELFRECEPTION 3
 #define KVASER_CTRL_MODE_OFF           4
 
-/* log message */
+/* Extended CAN identifier flag */
 #define KVASER_EXTENDED_FRAME          BIT(31)
 
+/* Kvaser USB CAN dongles are divided into two major families:
+ * - Leaf: Based on Renesas M32C, running firmware labeled as 'filo'
+ * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios'
+ */
+enum kvaser_usb_family {
+       KVASER_LEAF,
+       KVASER_USBCAN,
+};
+
 struct kvaser_msg_simple {
        u8 tid;
        u8 channel;
@@ -148,30 +191,55 @@ struct kvaser_msg_simple {
 struct kvaser_msg_cardinfo {
        u8 tid;
        u8 nchannels;
-       __le32 serial_number;
-       __le32 padding;
+       union {
+               struct {
+                       __le32 serial_number;
+                       __le32 padding;
+               } __packed leaf0;
+               struct {
+                       __le32 serial_number_low;
+                       __le32 serial_number_high;
+               } __packed usbcan0;
+       } __packed;
        __le32 clock_resolution;
        __le32 mfgdate;
        u8 ean[8];
        u8 hw_revision;
-       u8 usb_hs_mode;
-       __le16 padding2;
+       union {
+               struct {
+                       u8 usb_hs_mode;
+               } __packed leaf1;
+               struct {
+                       u8 padding;
+               } __packed usbcan1;
+       } __packed;
+       __le16 padding;
 } __packed;
 
 struct kvaser_msg_cardinfo2 {
        u8 tid;
-       u8 channel;
+       u8 reserved;
        u8 pcb_id[24];
        __le32 oem_unlock_code;
 } __packed;
 
-struct kvaser_msg_softinfo {
+struct leaf_msg_softinfo {
        u8 tid;
-       u8 channel;
+       u8 padding0;
        __le32 sw_options;
        __le32 fw_version;
        __le16 max_outstanding_tx;
-       __le16 padding[9];
+       __le16 padding1[9];
+} __packed;
+
+struct usbcan_msg_softinfo {
+       u8 tid;
+       u8 fw_name[5];
+       __le16 max_outstanding_tx;
+       u8 padding[6];
+       __le32 fw_version;
+       __le16 checksum;
+       __le16 sw_options;
 } __packed;
 
 struct kvaser_msg_busparams {
@@ -188,36 +256,86 @@ struct kvaser_msg_tx_can {
        u8 channel;
        u8 tid;
        u8 msg[14];
-       u8 padding;
-       u8 flags;
+       union {
+               struct {
+                       u8 padding;
+                       u8 flags;
+               } __packed leaf;
+               struct {
+                       u8 flags;
+                       u8 padding;
+               } __packed usbcan;
+       } __packed;
 } __packed;
 
-struct kvaser_msg_rx_can {
+struct kvaser_msg_rx_can_header {
        u8 channel;
        u8 flag;
+} __packed;
+
+struct leaf_msg_rx_can {
+       u8 channel;
+       u8 flag;
+
        __le16 time[3];
        u8 msg[14];
 } __packed;
 
-struct kvaser_msg_chip_state_event {
+struct usbcan_msg_rx_can {
+       u8 channel;
+       u8 flag;
+
+       u8 msg[14];
+       __le16 time;
+} __packed;
+
+struct leaf_msg_chip_state_event {
        u8 tid;
        u8 channel;
+
        __le16 time[3];
        u8 tx_errors_count;
        u8 rx_errors_count;
+
        u8 status;
        u8 padding[3];
 } __packed;
 
-struct kvaser_msg_tx_acknowledge {
+struct usbcan_msg_chip_state_event {
+       u8 tid;
+       u8 channel;
+
+       u8 tx_errors_count;
+       u8 rx_errors_count;
+       __le16 time;
+
+       u8 status;
+       u8 padding[3];
+} __packed;
+
+struct kvaser_msg_tx_acknowledge_header {
        u8 channel;
        u8 tid;
+} __packed;
+
+struct leaf_msg_tx_acknowledge {
+       u8 channel;
+       u8 tid;
+
        __le16 time[3];
        u8 flags;
        u8 time_offset;
 } __packed;
 
-struct kvaser_msg_error_event {
+struct usbcan_msg_tx_acknowledge {
+       u8 channel;
+       u8 tid;
+
+       __le16 time;
+       __le16 padding;
+} __packed;
+
+struct leaf_msg_error_event {
        u8 tid;
        u8 flags;
        __le16 time[3];
@@ -229,6 +347,18 @@ struct kvaser_msg_error_event {
        u8 error_factor;
 } __packed;
 
+struct usbcan_msg_error_event {
+       u8 tid;
+       u8 padding;
+       u8 tx_errors_count_ch0;
+       u8 rx_errors_count_ch0;
+       u8 tx_errors_count_ch1;
+       u8 rx_errors_count_ch1;
+       u8 status_ch0;
+       u8 status_ch1;
+       __le16 time;
+} __packed;
+
 struct kvaser_msg_ctrl_mode {
        u8 tid;
        u8 channel;
@@ -243,7 +373,7 @@ struct kvaser_msg_flush_queue {
        u8 padding[3];
 } __packed;
 
-struct kvaser_msg_log_message {
+struct leaf_msg_log_message {
        u8 channel;
        u8 flags;
        __le16 time[3];
@@ -260,19 +390,57 @@ struct kvaser_msg {
                struct kvaser_msg_simple simple;
                struct kvaser_msg_cardinfo cardinfo;
                struct kvaser_msg_cardinfo2 cardinfo2;
-               struct kvaser_msg_softinfo softinfo;
                struct kvaser_msg_busparams busparams;
+
+               struct kvaser_msg_rx_can_header rx_can_header;
+               struct kvaser_msg_tx_acknowledge_header tx_acknowledge_header;
+
+               union {
+                       struct leaf_msg_softinfo softinfo;
+                       struct leaf_msg_rx_can rx_can;
+                       struct leaf_msg_chip_state_event chip_state_event;
+                       struct leaf_msg_tx_acknowledge tx_acknowledge;
+                       struct leaf_msg_error_event error_event;
+                       struct leaf_msg_log_message log_message;
+               } __packed leaf;
+
+               union {
+                       struct usbcan_msg_softinfo softinfo;
+                       struct usbcan_msg_rx_can rx_can;
+                       struct usbcan_msg_chip_state_event chip_state_event;
+                       struct usbcan_msg_tx_acknowledge tx_acknowledge;
+                       struct usbcan_msg_error_event error_event;
+               } __packed usbcan;
+
                struct kvaser_msg_tx_can tx_can;
-               struct kvaser_msg_rx_can rx_can;
-               struct kvaser_msg_chip_state_event chip_state_event;
-               struct kvaser_msg_tx_acknowledge tx_acknowledge;
-               struct kvaser_msg_error_event error_event;
                struct kvaser_msg_ctrl_mode ctrl_mode;
                struct kvaser_msg_flush_queue flush_queue;
-               struct kvaser_msg_log_message log_message;
        } u;
 } __packed;
 
+/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
+ * handling. Some discrepancies between the two families exist:
+ *
+ * - USBCAN firmware does not report M16C "error factors"
+ * - USBCAN controllers has difficulties reporting if the raised error
+ *   event is for ch0 or ch1. They leave such arbitration to the OS
+ *   driver by letting it compare error counters with previous values
+ *   and decide the error event's channel. Thus for USBCAN, the channel
+ *   field is only advisory.
+ */
+struct kvaser_usb_error_summary {
+       u8 channel, status, txerr, rxerr;
+       union {
+               struct {
+                       u8 error_factor;
+               } leaf;
+               struct {
+                       u8 other_ch_status;
+                       u8 error_state;
+               } usbcan;
+       };
+};
+
 struct kvaser_usb_tx_urb_context {
        struct kvaser_usb_net_priv *priv;
        u32 echo_index;
@@ -288,6 +456,7 @@ struct kvaser_usb {
 
        u32 fw_version;
        unsigned int nchannels;
+       enum kvaser_usb_family family;
 
        bool rxinitdone;
        void *rxbuf[MAX_RX_URBS];
@@ -311,6 +480,7 @@ struct kvaser_usb_net_priv {
 };
 
 static const struct usb_device_id kvaser_usb_table[] = {
+       /* Leaf family IDs */
        { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
@@ -360,6 +530,17 @@ static const struct usb_device_id kvaser_usb_table[] = {
                .driver_info = KVASER_HAS_TXRX_ERRORS },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
+
+       /* USBCANII family IDs */
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
+               .driver_info = KVASER_HAS_TXRX_ERRORS },
+
        { }
 };
 MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -463,7 +644,14 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
        if (err)
                return err;
 
-       dev->fw_version = le32_to_cpu(msg.u.softinfo.fw_version);
+       switch (dev->family) {
+       case KVASER_LEAF:
+               dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
+               break;
+       case KVASER_USBCAN:
+               dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
+               break;
+       }
 
        return 0;
 }
@@ -482,7 +670,9 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
                return err;
 
        dev->nchannels = msg.u.cardinfo.nchannels;
-       if (dev->nchannels > MAX_NET_DEVICES)
+       if ((dev->nchannels > MAX_NET_DEVICES) ||
+           (dev->family == KVASER_USBCAN &&
+            dev->nchannels > MAX_USBCAN_NET_DEVICES))
                return -EINVAL;
 
        return 0;
@@ -496,8 +686,10 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
        struct kvaser_usb_net_priv *priv;
        struct sk_buff *skb;
        struct can_frame *cf;
-       u8 channel = msg->u.tx_acknowledge.channel;
-       u8 tid = msg->u.tx_acknowledge.tid;
+       u8 channel, tid;
+
+       channel = msg->u.tx_acknowledge_header.channel;
+       tid = msg->u.tx_acknowledge_header.tid;
 
        if (channel >= dev->nchannels) {
                dev_err(dev->udev->dev.parent,
@@ -520,10 +712,10 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
                skb = alloc_can_err_skb(priv->netdev, &cf);
                if (skb) {
                        cf->can_id |= CAN_ERR_RESTARTED;
-                       netif_rx(skb);
 
                        stats->rx_packets++;
                        stats->rx_bytes += cf->can_dlc;
+                       netif_rx(skb);
                } else {
                        netdev_err(priv->netdev,
                                   "No memory left for err_skb\n");
@@ -587,7 +779,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
                          usb_sndbulkpipe(dev->udev,
                                          dev->bulk_out->bEndpointAddress),
                          buf, msg->len,
-                         kvaser_usb_simple_msg_callback, priv);
+                         kvaser_usb_simple_msg_callback, netdev);
        usb_anchor_urb(urb, &priv->tx_submitted);
 
        err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -615,165 +807,280 @@ static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
                priv->tx_contexts[i].echo_index = MAX_TX_URBS;
 }
 
-static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
-                               const struct kvaser_msg *msg)
+static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
+                                                const struct kvaser_usb_error_summary *es,
+                                                struct can_frame *cf)
 {
-       struct can_frame *cf;
-       struct sk_buff *skb;
-       struct net_device_stats *stats;
-       struct kvaser_usb_net_priv *priv;
-       unsigned int new_state;
-       u8 channel, status, txerr, rxerr, error_factor;
+       struct kvaser_usb *dev = priv->dev;
+       struct net_device_stats *stats = &priv->netdev->stats;
+       enum can_state cur_state, new_state, tx_state, rx_state;
 
-       switch (msg->id) {
-       case CMD_CAN_ERROR_EVENT:
-               channel = msg->u.error_event.channel;
-               status =  msg->u.error_event.status;
-               txerr = msg->u.error_event.tx_errors_count;
-               rxerr = msg->u.error_event.rx_errors_count;
-               error_factor = msg->u.error_event.error_factor;
-               break;
-       case CMD_LOG_MESSAGE:
-               channel = msg->u.log_message.channel;
-               status = msg->u.log_message.data[0];
-               txerr = msg->u.log_message.data[2];
-               rxerr = msg->u.log_message.data[3];
-               error_factor = msg->u.log_message.data[1];
+       netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status);
+
+       new_state = cur_state = priv->can.state;
+
+       if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET))
+               new_state = CAN_STATE_BUS_OFF;
+       else if (es->status & M16C_STATE_BUS_PASSIVE)
+               new_state = CAN_STATE_ERROR_PASSIVE;
+       else if (es->status & M16C_STATE_BUS_ERROR) {
+               /* Guard against spurious error events after a busoff */
+               if (cur_state < CAN_STATE_BUS_OFF) {
+                       if ((es->txerr >= 128) || (es->rxerr >= 128))
+                               new_state = CAN_STATE_ERROR_PASSIVE;
+                       else if ((es->txerr >= 96) || (es->rxerr >= 96))
+                               new_state = CAN_STATE_ERROR_WARNING;
+                       else if (cur_state > CAN_STATE_ERROR_ACTIVE)
+                               new_state = CAN_STATE_ERROR_ACTIVE;
+               }
+       }
+
+       if (!es->status)
+               new_state = CAN_STATE_ERROR_ACTIVE;
+
+       if (new_state != cur_state) {
+               tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
+               rx_state = (es->txerr <= es->rxerr) ? new_state : 0;
+
+               can_change_state(priv->netdev, cf, tx_state, rx_state);
+       }
+
+       if (priv->can.restart_ms &&
+           (cur_state >= CAN_STATE_BUS_OFF) &&
+           (new_state < CAN_STATE_BUS_OFF)) {
+               priv->can.can_stats.restarts++;
+       }
+
+       switch (dev->family) {
+       case KVASER_LEAF:
+               if (es->leaf.error_factor) {
+                       priv->can.can_stats.bus_error++;
+                       stats->rx_errors++;
+               }
                break;
-       case CMD_CHIP_STATE_EVENT:
-               channel = msg->u.chip_state_event.channel;
-               status =  msg->u.chip_state_event.status;
-               txerr = msg->u.chip_state_event.tx_errors_count;
-               rxerr = msg->u.chip_state_event.rx_errors_count;
-               error_factor = 0;
+       case KVASER_USBCAN:
+               if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR)
+                       stats->tx_errors++;
+               if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR)
+                       stats->rx_errors++;
+               if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) {
+                       priv->can.can_stats.bus_error++;
+               }
                break;
-       default:
-               dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
-                       msg->id);
-               return;
        }
 
-       if (channel >= dev->nchannels) {
+       priv->bec.txerr = es->txerr;
+       priv->bec.rxerr = es->rxerr;
+}
+
+static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
+                               const struct kvaser_usb_error_summary *es)
+{
+       struct can_frame *cf, tmp_cf = { .can_id = CAN_ERR_FLAG, .can_dlc = CAN_ERR_DLC };
+       struct sk_buff *skb;
+       struct net_device_stats *stats;
+       struct kvaser_usb_net_priv *priv;
+       enum can_state old_state, new_state;
+
+       if (es->channel >= dev->nchannels) {
                dev_err(dev->udev->dev.parent,
-                       "Invalid channel number (%d)\n", channel);
+                       "Invalid channel number (%d)\n", es->channel);
                return;
        }
 
-       priv = dev->nets[channel];
+       priv = dev->nets[es->channel];
        stats = &priv->netdev->stats;
 
-       if (status & M16C_STATE_BUS_RESET) {
-               kvaser_usb_unlink_tx_urbs(priv);
-               return;
-       }
+       /* Update all of the can interface's state and error counters before
+        * trying any memory allocation that can actually fail with -ENOMEM.
+        *
+        * We send a temporary stack-allocated error can frame to
+        * can_change_state() for the very same reason.
+        *
+        * TODO: Split can_change_state() responsibility between updating the
+        * can interface's state and counters, and the setting up of can error
+        * frame ID and data to userspace. Remove stack allocation afterwards.
+        */
+       old_state = priv->can.state;
+       kvaser_usb_rx_error_update_can_state(priv, es, &tmp_cf);
+       new_state = priv->can.state;
 
        skb = alloc_can_err_skb(priv->netdev, &cf);
        if (!skb) {
                stats->rx_dropped++;
                return;
        }
-
-       new_state = priv->can.state;
-
-       netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
-
-       if (status & M16C_STATE_BUS_OFF) {
-               cf->can_id |= CAN_ERR_BUSOFF;
-
-               priv->can.can_stats.bus_off++;
-               if (!priv->can.restart_ms)
-                       kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP);
-
-               netif_carrier_off(priv->netdev);
-
-               new_state = CAN_STATE_BUS_OFF;
-       } else if (status & M16C_STATE_BUS_PASSIVE) {
-               if (priv->can.state != CAN_STATE_ERROR_PASSIVE) {
-                       cf->can_id |= CAN_ERR_CRTL;
-
-                       if (txerr || rxerr)
-                               cf->data[1] = (txerr > rxerr)
-                                               ? CAN_ERR_CRTL_TX_PASSIVE
-                                               : CAN_ERR_CRTL_RX_PASSIVE;
-                       else
-                               cf->data[1] = CAN_ERR_CRTL_TX_PASSIVE |
-                                             CAN_ERR_CRTL_RX_PASSIVE;
-
-                       priv->can.can_stats.error_passive++;
+       memcpy(cf, &tmp_cf, sizeof(*cf));
+
+       if (new_state != old_state) {
+               if (es->status &
+                   (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
+                       if (!priv->can.restart_ms)
+                               kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP);
+                       netif_carrier_off(priv->netdev);
                }
 
-               new_state = CAN_STATE_ERROR_PASSIVE;
+               if (priv->can.restart_ms &&
+                   (old_state >= CAN_STATE_BUS_OFF) &&
+                   (new_state < CAN_STATE_BUS_OFF)) {
+                       cf->can_id |= CAN_ERR_RESTARTED;
+                       netif_carrier_on(priv->netdev);
+               }
        }
 
-       if (status == M16C_STATE_BUS_ERROR) {
-               if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
-                   ((txerr >= 96) || (rxerr >= 96))) {
-                       cf->can_id |= CAN_ERR_CRTL;
-                       cf->data[1] = (txerr > rxerr)
-                                       ? CAN_ERR_CRTL_TX_WARNING
-                                       : CAN_ERR_CRTL_RX_WARNING;
-
-                       priv->can.can_stats.error_warning++;
-                       new_state = CAN_STATE_ERROR_WARNING;
-               } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
-                       cf->can_id |= CAN_ERR_PROT;
-                       cf->data[2] = CAN_ERR_PROT_ACTIVE;
-
-                       new_state = CAN_STATE_ERROR_ACTIVE;
+       switch (dev->family) {
+       case KVASER_LEAF:
+               if (es->leaf.error_factor) {
+                       cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+
+                       if (es->leaf.error_factor & M16C_EF_ACKE)
+                               cf->data[3] |= (CAN_ERR_PROT_LOC_ACK);
+                       if (es->leaf.error_factor & M16C_EF_CRCE)
+                               cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+                                               CAN_ERR_PROT_LOC_CRC_DEL);
+                       if (es->leaf.error_factor & M16C_EF_FORME)
+                               cf->data[2] |= CAN_ERR_PROT_FORM;
+                       if (es->leaf.error_factor & M16C_EF_STFE)
+                               cf->data[2] |= CAN_ERR_PROT_STUFF;
+                       if (es->leaf.error_factor & M16C_EF_BITE0)
+                               cf->data[2] |= CAN_ERR_PROT_BIT0;
+                       if (es->leaf.error_factor & M16C_EF_BITE1)
+                               cf->data[2] |= CAN_ERR_PROT_BIT1;
+                       if (es->leaf.error_factor & M16C_EF_TRE)
+                               cf->data[2] |= CAN_ERR_PROT_TX;
+               }
+               break;
+       case KVASER_USBCAN:
+               if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) {
+                       cf->can_id |= CAN_ERR_BUSERROR;
                }
+               break;
        }
 
-       if (!status) {
-               cf->can_id |= CAN_ERR_PROT;
-               cf->data[2] = CAN_ERR_PROT_ACTIVE;
+       cf->data[6] = es->txerr;
+       cf->data[7] = es->rxerr;
 
-               new_state = CAN_STATE_ERROR_ACTIVE;
-       }
+       stats->rx_packets++;
+       stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
+}
 
-       if (priv->can.restart_ms &&
-           (priv->can.state >= CAN_STATE_BUS_OFF) &&
-           (new_state < CAN_STATE_BUS_OFF)) {
-               cf->can_id |= CAN_ERR_RESTARTED;
-               netif_carrier_on(priv->netdev);
+/* For USBCAN, report error to userspace iff the channels's errors counter
+ * has changed, or we're the only channel seeing a bus error state.
+ */
+static void kvaser_usbcan_conditionally_rx_error(const struct kvaser_usb *dev,
+                                                struct kvaser_usb_error_summary *es)
+{
+       struct kvaser_usb_net_priv *priv;
+       int channel;
+       bool report_error;
 
-               priv->can.can_stats.restarts++;
+       channel = es->channel;
+       if (channel >= dev->nchannels) {
+               dev_err(dev->udev->dev.parent,
+                       "Invalid channel number (%d)\n", channel);
+               return;
        }
 
-       if (error_factor) {
-               priv->can.can_stats.bus_error++;
-               stats->rx_errors++;
+       priv = dev->nets[channel];
+       report_error = false;
 
-               cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
-
-               if (error_factor & M16C_EF_ACKE)
-                       cf->data[3] |= (CAN_ERR_PROT_LOC_ACK);
-               if (error_factor & M16C_EF_CRCE)
-                       cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
-                                       CAN_ERR_PROT_LOC_CRC_DEL);
-               if (error_factor & M16C_EF_FORME)
-                       cf->data[2] |= CAN_ERR_PROT_FORM;
-               if (error_factor & M16C_EF_STFE)
-                       cf->data[2] |= CAN_ERR_PROT_STUFF;
-               if (error_factor & M16C_EF_BITE0)
-                       cf->data[2] |= CAN_ERR_PROT_BIT0;
-               if (error_factor & M16C_EF_BITE1)
-                       cf->data[2] |= CAN_ERR_PROT_BIT1;
-               if (error_factor & M16C_EF_TRE)
-                       cf->data[2] |= CAN_ERR_PROT_TX;
+       if (es->txerr != priv->bec.txerr) {
+               es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR;
+               report_error = true;
+       }
+       if (es->rxerr != priv->bec.rxerr) {
+               es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR;
+               report_error = true;
+       }
+       if ((es->status & M16C_STATE_BUS_ERROR) &&
+           !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) {
+               es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR;
+               report_error = true;
        }
 
-       cf->data[6] = txerr;
-       cf->data[7] = rxerr;
+       if (report_error)
+               kvaser_usb_rx_error(dev, es);
+}
 
-       priv->bec.txerr = txerr;
-       priv->bec.rxerr = rxerr;
+static void kvaser_usbcan_rx_error(const struct kvaser_usb *dev,
+                                  const struct kvaser_msg *msg)
+{
+       struct kvaser_usb_error_summary es = { };
 
-       priv->can.state = new_state;
+       switch (msg->id) {
+       /* Sometimes errors are sent as unsolicited chip state events */
+       case CMD_CHIP_STATE_EVENT:
+               es.channel = msg->u.usbcan.chip_state_event.channel;
+               es.status =  msg->u.usbcan.chip_state_event.status;
+               es.txerr = msg->u.usbcan.chip_state_event.tx_errors_count;
+               es.rxerr = msg->u.usbcan.chip_state_event.rx_errors_count;
+               kvaser_usbcan_conditionally_rx_error(dev, &es);
+               break;
 
-       netif_rx(skb);
+       case CMD_CAN_ERROR_EVENT:
+               es.channel = 0;
+               es.status = msg->u.usbcan.error_event.status_ch0;
+               es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch0;
+               es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch0;
+               es.usbcan.other_ch_status =
+                       msg->u.usbcan.error_event.status_ch1;
+               kvaser_usbcan_conditionally_rx_error(dev, &es);
+
+               /* The USBCAN firmware supports up to 2 channels.
+                * Now that ch0 was checked, check if ch1 has any errors.
+                */
+               if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
+                       es.channel = 1;
+                       es.status = msg->u.usbcan.error_event.status_ch1;
+                       es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch1;
+                       es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch1;
+                       es.usbcan.other_ch_status =
+                               msg->u.usbcan.error_event.status_ch0;
+                       kvaser_usbcan_conditionally_rx_error(dev, &es);
+               }
+               break;
 
-       stats->rx_packets++;
-       stats->rx_bytes += cf->can_dlc;
+       default:
+               dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
+                       msg->id);
+       }
+}
+
+static void kvaser_leaf_rx_error(const struct kvaser_usb *dev,
+                                const struct kvaser_msg *msg)
+{
+       struct kvaser_usb_error_summary es = { };
+
+       switch (msg->id) {
+       case CMD_CAN_ERROR_EVENT:
+               es.channel = msg->u.leaf.error_event.channel;
+               es.status =  msg->u.leaf.error_event.status;
+               es.txerr = msg->u.leaf.error_event.tx_errors_count;
+               es.rxerr = msg->u.leaf.error_event.rx_errors_count;
+               es.leaf.error_factor = msg->u.leaf.error_event.error_factor;
+               break;
+       case CMD_LEAF_LOG_MESSAGE:
+               es.channel = msg->u.leaf.log_message.channel;
+               es.status = msg->u.leaf.log_message.data[0];
+               es.txerr = msg->u.leaf.log_message.data[2];
+               es.rxerr = msg->u.leaf.log_message.data[3];
+               es.leaf.error_factor = msg->u.leaf.log_message.data[1];
+               break;
+       case CMD_CHIP_STATE_EVENT:
+               es.channel = msg->u.leaf.chip_state_event.channel;
+               es.status =  msg->u.leaf.chip_state_event.status;
+               es.txerr = msg->u.leaf.chip_state_event.tx_errors_count;
+               es.rxerr = msg->u.leaf.chip_state_event.rx_errors_count;
+               es.leaf.error_factor = 0;
+               break;
+       default:
+               dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
+                       msg->id);
+               return;
+       }
+
+       kvaser_usb_rx_error(dev, &es);
 }
 
 static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
@@ -783,16 +1090,19 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
        struct sk_buff *skb;
        struct net_device_stats *stats = &priv->netdev->stats;
 
-       if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME |
+       if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
                                         MSG_FLAG_NERR)) {
                netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n",
-                          msg->u.rx_can.flag);
+                          msg->u.rx_can_header.flag);
 
                stats->rx_errors++;
                return;
        }
 
-       if (msg->u.rx_can.flag & MSG_FLAG_OVERRUN) {
+       if (msg->u.rx_can_header.flag & MSG_FLAG_OVERRUN) {
+               stats->rx_over_errors++;
+               stats->rx_errors++;
+
                skb = alloc_can_err_skb(priv->netdev, &cf);
                if (!skb) {
                        stats->rx_dropped++;
@@ -802,13 +1112,9 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
                cf->can_id |= CAN_ERR_CRTL;
                cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
 
-               stats->rx_over_errors++;
-               stats->rx_errors++;
-
-               netif_rx(skb);
-
                stats->rx_packets++;
                stats->rx_bytes += cf->can_dlc;
+               netif_rx(skb);
        }
 }
 
@@ -819,7 +1125,8 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
        struct can_frame *cf;
        struct sk_buff *skb;
        struct net_device_stats *stats;
-       u8 channel = msg->u.rx_can.channel;
+       u8 channel = msg->u.rx_can_header.channel;
+       const u8 *rx_msg = NULL;        /* GCC */
 
        if (channel >= dev->nchannels) {
                dev_err(dev->udev->dev.parent,
@@ -830,67 +1137,74 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
        priv = dev->nets[channel];
        stats = &priv->netdev->stats;
 
-       if ((msg->u.rx_can.flag & MSG_FLAG_ERROR_FRAME) &&
-           (msg->id == CMD_LOG_MESSAGE)) {
-               kvaser_usb_rx_error(dev, msg);
+       if ((msg->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
+           (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE)) {
+               kvaser_leaf_rx_error(dev, msg);
                return;
-       } else if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME |
-                                        MSG_FLAG_NERR |
-                                        MSG_FLAG_OVERRUN)) {
+       } else if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
+                                               MSG_FLAG_NERR |
+                                               MSG_FLAG_OVERRUN)) {
                kvaser_usb_rx_can_err(priv, msg);
                return;
-       } else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) {
+       } else if (msg->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) {
                netdev_warn(priv->netdev,
                            "Unhandled frame (flags: 0x%02x)",
-                           msg->u.rx_can.flag);
+                           msg->u.rx_can_header.flag);
                return;
        }
 
+       switch (dev->family) {
+       case KVASER_LEAF:
+               rx_msg = msg->u.leaf.rx_can.msg;
+               break;
+       case KVASER_USBCAN:
+               rx_msg = msg->u.usbcan.rx_can.msg;
+               break;
+       }
+
        skb = alloc_can_skb(priv->netdev, &cf);
        if (!skb) {
                stats->tx_dropped++;
                return;
        }
 
-       if (msg->id == CMD_LOG_MESSAGE) {
-               cf->can_id = le32_to_cpu(msg->u.log_message.id);
+       if (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE) {
+               cf->can_id = le32_to_cpu(msg->u.leaf.log_message.id);
                if (cf->can_id & KVASER_EXTENDED_FRAME)
                        cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
                else
                        cf->can_id &= CAN_SFF_MASK;
 
-               cf->can_dlc = get_can_dlc(msg->u.log_message.dlc);
+               cf->can_dlc = get_can_dlc(msg->u.leaf.log_message.dlc);
 
-               if (msg->u.log_message.flags & MSG_FLAG_REMOTE_FRAME)
+               if (msg->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME)
                        cf->can_id |= CAN_RTR_FLAG;
                else
-                       memcpy(cf->data, &msg->u.log_message.data,
+                       memcpy(cf->data, &msg->u.leaf.log_message.data,
                               cf->can_dlc);
        } else {
-               cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) |
-                            (msg->u.rx_can.msg[1] & 0x3f);
+               cf->can_id = ((rx_msg[0] & 0x1f) << 6) | (rx_msg[1] & 0x3f);
 
                if (msg->id == CMD_RX_EXT_MESSAGE) {
                        cf->can_id <<= 18;
-                       cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
-                                     ((msg->u.rx_can.msg[3] & 0xff) << 6) |
-                                     (msg->u.rx_can.msg[4] & 0x3f);
+                       cf->can_id |= ((rx_msg[2] & 0x0f) << 14) |
+                                     ((rx_msg[3] & 0xff) << 6) |
+                                     (rx_msg[4] & 0x3f);
                        cf->can_id |= CAN_EFF_FLAG;
                }
 
-               cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]);
+               cf->can_dlc = get_can_dlc(rx_msg[5]);
 
-               if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME)
+               if (msg->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME)
                        cf->can_id |= CAN_RTR_FLAG;
                else
-                       memcpy(cf->data, &msg->u.rx_can.msg[6],
+                       memcpy(cf->data, &rx_msg[6],
                               cf->can_dlc);
        }
 
-       netif_rx(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 }
 
 static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev,
@@ -947,21 +1261,35 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
 
        case CMD_RX_STD_MESSAGE:
        case CMD_RX_EXT_MESSAGE:
-       case CMD_LOG_MESSAGE:
+               kvaser_usb_rx_can_msg(dev, msg);
+               break;
+
+       case CMD_LEAF_LOG_MESSAGE:
+               if (dev->family != KVASER_LEAF)
+                       goto warn;
                kvaser_usb_rx_can_msg(dev, msg);
                break;
 
        case CMD_CHIP_STATE_EVENT:
        case CMD_CAN_ERROR_EVENT:
-               kvaser_usb_rx_error(dev, msg);
+               if (dev->family == KVASER_LEAF)
+                       kvaser_leaf_rx_error(dev, msg);
+               else
+                       kvaser_usbcan_rx_error(dev, msg);
                break;
 
        case CMD_TX_ACKNOWLEDGE:
                kvaser_usb_tx_acknowledge(dev, msg);
                break;
 
+       /* Ignored messages */
+       case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
+               if (dev->family != KVASER_USBCAN)
+                       goto warn;
+               break;
+
        default:
-               dev_warn(dev->udev->dev.parent,
+warn:          dev_warn(dev->udev->dev.parent,
                         "Unhandled message (%d)\n", msg->id);
                break;
        }
@@ -1181,7 +1509,7 @@ static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
                                  dev->rxbuf[i],
                                  dev->rxbuf_dma[i]);
 
-       for (i = 0; i < MAX_NET_DEVICES; i++) {
+       for (i = 0; i < dev->nchannels; i++) {
                struct kvaser_usb_net_priv *priv = dev->nets[i];
 
                if (priv)
@@ -1246,6 +1574,9 @@ static int kvaser_usb_close(struct net_device *netdev)
        if (err)
                netdev_warn(netdev, "Cannot stop device, error %d\n", err);
 
+       /* reset tx contexts */
+       kvaser_usb_unlink_tx_urbs(priv);
+
        priv->can.state = CAN_STATE_STOPPED;
        close_candev(priv->netdev);
 
@@ -1286,6 +1617,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
        struct kvaser_msg *msg;
        int i, err;
        int ret = NETDEV_TX_OK;
+       u8 *msg_tx_can_flags = NULL;            /* GCC */
 
        if (can_dropped_invalid_skb(netdev, skb))
                return NETDEV_TX_OK;
@@ -1294,20 +1626,32 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
        if (!urb) {
                netdev_err(netdev, "No memory left for URBs\n");
                stats->tx_dropped++;
-               goto nourbmem;
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
        }
 
        buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
        if (!buf) {
                stats->tx_dropped++;
+               dev_kfree_skb(skb);
                goto nobufmem;
        }
 
        msg = buf;
        msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_tx_can);
-       msg->u.tx_can.flags = 0;
        msg->u.tx_can.channel = priv->channel;
 
+       switch (dev->family) {
+       case KVASER_LEAF:
+               msg_tx_can_flags = &msg->u.tx_can.leaf.flags;
+               break;
+       case KVASER_USBCAN:
+               msg_tx_can_flags = &msg->u.tx_can.usbcan.flags;
+               break;
+       }
+
+       *msg_tx_can_flags = 0;
+
        if (cf->can_id & CAN_EFF_FLAG) {
                msg->id = CMD_TX_EXT_MESSAGE;
                msg->u.tx_can.msg[0] = (cf->can_id >> 24) & 0x1f;
@@ -1325,7 +1669,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
        memcpy(&msg->u.tx_can.msg[6], cf->data, cf->can_dlc);
 
        if (cf->can_id & CAN_RTR_FLAG)
-               msg->u.tx_can.flags |= MSG_FLAG_REMOTE_FRAME;
+               *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
 
        for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
                if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
@@ -1334,6 +1678,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                }
        }
 
+       /* This should never happen; it implies a flow control bug */
        if (!context) {
                netdev_warn(netdev, "cannot find free context\n");
                ret =  NETDEV_TX_BUSY;
@@ -1364,9 +1709,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
        if (unlikely(err)) {
                can_free_echo_skb(netdev, context->echo_index);
 
-               skb = NULL; /* set to NULL to avoid double free in
-                            * dev_kfree_skb(skb) */
-
                atomic_dec(&priv->active_tx_urbs);
                usb_unanchor_urb(urb);
 
@@ -1388,8 +1730,6 @@ releasebuf:
        kfree(buf);
 nobufmem:
        usb_free_urb(urb);
-nourbmem:
-       dev_kfree_skb(skb);
        return ret;
 }
 
@@ -1502,6 +1842,10 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
        struct kvaser_usb_net_priv *priv;
        int i, err;
 
+       err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
+       if (err)
+               return err;
+
        netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
        if (!netdev) {
                dev_err(&intf->dev, "Cannot alloc candev\n");
@@ -1588,12 +1932,23 @@ static int kvaser_usb_probe(struct usb_interface *intf,
 {
        struct kvaser_usb *dev;
        int err = -ENOMEM;
-       int i;
+       int i, retry = 3;
 
        dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
        if (!dev)
                return -ENOMEM;
 
+       if (kvaser_is_leaf(id)) {
+               dev->family = KVASER_LEAF;
+       } else if (kvaser_is_usbcan(id)) {
+               dev->family = KVASER_USBCAN;
+       } else {
+               dev_err(&intf->dev,
+                       "Product ID (%d) does not belong to any known Kvaser USB family",
+                       id->idProduct);
+               return -ENODEV;
+       }
+
        err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
        if (err) {
                dev_err(&intf->dev, "Cannot get usb endpoint(s)");
@@ -1606,10 +1961,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
 
        usb_set_intfdata(intf, dev);
 
-       for (i = 0; i < MAX_NET_DEVICES; i++)
-               kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
+       /* On some x86 laptops, plugging a Kvaser device again after
+        * an unplug makes the firmware always ignore the very first
+        * command. For such a case, provide some room for retries
+        * instead of completely exiting the driver.
+        */
+       do {
+               err = kvaser_usb_get_software_info(dev);
+       } while (--retry && err == -ETIMEDOUT);
 
-       err = kvaser_usb_get_software_info(dev);
        if (err) {
                dev_err(&intf->dev,
                        "Cannot get software infos, error %d\n", err);
index 1aefbc88d6437e151754103c025126033b9d51bb..1839e9ca62e713c7997ea09955995686bbd3da39 100644 (file)
@@ -1,2 +1,2 @@
 obj-$(CONFIG_CAN_PEAK_USB) += peak_usb.o
-peak_usb-y = pcan_usb_core.o pcan_usb.o pcan_usb_pro.o
+peak_usb-y = pcan_usb_core.o pcan_usb.o pcan_usb_pro.o pcan_usb_fd.o
diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/drivers/net/can/usb/peak_usb/pcan_ucan.h
new file mode 100644 (file)
index 0000000..1ba7c25
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+ * CAN driver for PEAK System micro-CAN based adapters
+ *
+ * Copyright (C) 2003-2011 PEAK System-Technik GmbH
+ * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#ifndef PUCAN_H
+#define PUCAN_H
+
+/* uCAN commands opcodes list (low-order 10 bits) */
+#define PUCAN_CMD_NOP                  0x000
+#define PUCAN_CMD_RESET_MODE           0x001
+#define PUCAN_CMD_NORMAL_MODE          0x002
+#define PUCAN_CMD_LISTEN_ONLY_MODE     0x003
+#define PUCAN_CMD_TIMING_SLOW          0x004
+#define PUCAN_CMD_TIMING_FAST          0x005
+#define PUCAN_CMD_FILTER_STD           0x008
+#define PUCAN_CMD_TX_ABORT             0x009
+#define PUCAN_CMD_WR_ERR_CNT           0x00a
+#define PUCAN_CMD_RX_FRAME_ENABLE      0x00b
+#define PUCAN_CMD_RX_FRAME_DISABLE     0x00c
+#define PUCAN_CMD_END_OF_COLLECTION    0x3ff
+
+/* uCAN received messages list */
+#define PUCAN_MSG_CAN_RX               0x0001
+#define PUCAN_MSG_ERROR                        0x0002
+#define PUCAN_MSG_STATUS               0x0003
+#define PUCAN_MSG_BUSLOAD              0x0004
+#define PUCAN_MSG_CAN_TX               0x1000
+
+/* uCAN command common header */
+struct __packed pucan_command {
+       __le16  opcode_channel;
+       u16     args[3];
+};
+
+/* uCAN TIMING_SLOW command fields */
+#define PUCAN_TSLOW_SJW_T(s, t)                (((s) & 0xf) | ((!!(t)) << 7))
+#define PUCAN_TSLOW_TSEG2(t)           ((t) & 0xf)
+#define PUCAN_TSLOW_TSEG1(t)           ((t) & 0x3f)
+#define PUCAN_TSLOW_BRP(b)             ((b) & 0x3ff)
+
+struct __packed pucan_timing_slow {
+       __le16  opcode_channel;
+
+       u8      ewl;            /* Error Warning limit */
+       u8      sjw_t;          /* Sync Jump Width + Triple sampling */
+       u8      tseg2;          /* Timing SEGment 2 */
+       u8      tseg1;          /* Timing SEGment 1 */
+
+       __le16  brp;            /* BaudRate Prescaler */
+};
+
+/* uCAN TIMING_FAST command fields */
+#define PUCAN_TFAST_SJW(s)             ((s) & 0x3)
+#define PUCAN_TFAST_TSEG2(t)           ((t) & 0x7)
+#define PUCAN_TFAST_TSEG1(t)           ((t) & 0xf)
+#define PUCAN_TFAST_BRP(b)             ((b) & 0x3ff)
+
+struct __packed pucan_timing_fast {
+       __le16  opcode_channel;
+
+       u8      unused;
+       u8      sjw;            /* Sync Jump Width */
+       u8      tseg2;          /* Timing SEGment 2 */
+       u8      tseg1;          /* Timing SEGment 1 */
+
+       __le16  brp;            /* BaudRate Prescaler */
+};
+
+/* uCAN FILTER_STD command fields */
+#define PUCAN_FLTSTD_ROW_IDX_BITS      6
+
+struct __packed pucan_filter_std {
+       __le16  opcode_channel;
+
+       __le16  idx;
+       __le32  mask;           /* CAN-ID bitmask in idx range */
+};
+
+/* uCAN WR_ERR_CNT command fields */
+#define PUCAN_WRERRCNT_TE              0x4000  /* Tx error cntr write Enable */
+#define PUCAN_WRERRCNT_RE              0x8000  /* Rx error cntr write Enable */
+
+struct __packed pucan_wr_err_cnt {
+       __le16  opcode_channel;
+
+       __le16  sel_mask;
+       u8      tx_counter;     /* Tx error counter new value */
+       u8      rx_counter;     /* Rx error counter new value */
+
+       u16     unused;
+};
+
+/* uCAN RX_FRAME_ENABLE command fields */
+#define PUCAN_FLTEXT_ERROR             0x0001
+#define PUCAN_FLTEXT_BUSLOAD           0x0002
+
+struct __packed pucan_filter_ext {
+       __le16  opcode_channel;
+
+       __le16  ext_mask;
+       u32     unused;
+};
+
+/* uCAN received messages global format */
+struct __packed pucan_msg {
+       __le16  size;
+       __le16  type;
+       __le32  ts_low;
+       __le32  ts_high;
+};
+
+/* uCAN flags for CAN/CANFD messages */
+#define PUCAN_MSG_SELF_RECEIVE         0x80
+#define PUCAN_MSG_ERROR_STATE_IND      0x40    /* error state indicator */
+#define PUCAN_MSG_BITRATE_SWITCH       0x20    /* bitrate switch */
+#define PUCAN_MSG_EXT_DATA_LEN         0x10    /* extended data length */
+#define PUCAN_MSG_SINGLE_SHOT          0x08
+#define PUCAN_MSG_LOOPED_BACK          0x04
+#define PUCAN_MSG_EXT_ID               0x02
+#define PUCAN_MSG_RTR                  0x01
+
+struct __packed pucan_rx_msg {
+       __le16  size;
+       __le16  type;
+       __le32  ts_low;
+       __le32  ts_high;
+       __le32  tag_low;
+       __le32  tag_high;
+       u8      channel_dlc;
+       u8      client;
+       __le16  flags;
+       __le32  can_id;
+       u8      d[0];
+};
+
+/* uCAN error types */
+#define PUCAN_ERMSG_BIT_ERROR          0
+#define PUCAN_ERMSG_FORM_ERROR         1
+#define PUCAN_ERMSG_STUFF_ERROR                2
+#define PUCAN_ERMSG_OTHER_ERROR                3
+#define PUCAN_ERMSG_ERR_CNT_DEC                4
+
+struct __packed pucan_error_msg {
+       __le16  size;
+       __le16  type;
+       __le32  ts_low;
+       __le32  ts_high;
+       u8      channel_type_d;
+       u8      code_g;
+       u8      tx_err_cnt;
+       u8      rx_err_cnt;
+};
+
+#define PUCAN_BUS_PASSIVE              0x20
+#define PUCAN_BUS_WARNING              0x40
+#define PUCAN_BUS_BUSOFF               0x80
+
+struct __packed pucan_status_msg {
+       __le16  size;
+       __le16  type;
+       __le32  ts_low;
+       __le32  ts_high;
+       u8      channel_p_w_b;
+       u8      unused[3];
+};
+
+/* uCAN transmitted message format */
+#define PUCAN_MSG_CHANNEL_DLC(c, d)    (((c) & 0xf) | ((d) << 4))
+
+struct __packed pucan_tx_msg {
+       __le16  size;
+       __le16  type;
+       __le32  tag_low;
+       __le32  tag_high;
+       u8      channel_dlc;
+       u8      client;
+       __le16  flags;
+       __le32  can_id;
+       u8      d[0];
+};
+
+/* build the cmd opcode_channel field with respect to the correct endianness */
+static inline __le16 pucan_cmd_opcode_channel(struct peak_usb_device *dev,
+                                             int opcode)
+{
+       return cpu_to_le16(((dev->ctrl_idx) << 12) | ((opcode) & 0x3ff));
+}
+
+/* return the channel number part from any received message channel_dlc field */
+static inline int pucan_msg_get_channel(struct pucan_rx_msg *rm)
+{
+       return rm->channel_dlc & 0xf;
+}
+
+/* return the dlc value from any received message channel_dlc field */
+static inline int pucan_msg_get_dlc(struct pucan_rx_msg *rm)
+{
+       return rm->channel_dlc >> 4;
+}
+
+static inline int pucan_ermsg_get_channel(struct pucan_error_msg *em)
+{
+       return em->channel_type_d & 0x0f;
+}
+
+static inline int pucan_stmsg_get_channel(struct pucan_status_msg *sm)
+{
+       return sm->channel_p_w_b & 0x0f;
+}
+
+#endif
index 4e1659d07979b77e231040509608b043dba4841b..72427f21edffaa9fed0874085e277aa4c83783b7 100644 (file)
@@ -488,6 +488,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
        switch (new_state) {
        case CAN_STATE_BUS_OFF:
                cf->can_id |= CAN_ERR_BUSOFF;
+               mc->pdev->dev.can.can_stats.bus_off++;
                can_bus_off(mc->netdev);
                break;
 
@@ -854,10 +855,11 @@ static int pcan_usb_probe(struct usb_interface *intf)
 /*
  * describe the PCAN-USB adapter
  */
-struct peak_usb_adapter pcan_usb = {
+const struct peak_usb_adapter pcan_usb = {
        .name = "PCAN-USB",
        .device_id = PCAN_USB_PRODUCT_ID,
        .ctrl_count = 1,
+       .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
        .clock = {
                .freq = PCAN_USB_CRYSTAL_HZ / 2 ,
        },
index c62f48a1161d1e871550489cbf3e5b4ccd90aa1e..7921cff93a63b107c0ecc5cbadf77b1c6ab4b367 100644 (file)
@@ -37,16 +37,19 @@ MODULE_LICENSE("GPL v2");
 static struct usb_device_id peak_usb_table[] = {
        {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID)},
        {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)},
+       {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)},
+       {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)},
        {} /* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE(usb, peak_usb_table);
 
 /* List of supported PCAN-USB adapters (NULL terminated list) */
-static struct peak_usb_adapter *peak_usb_adapters_list[] = {
+static const struct peak_usb_adapter *const peak_usb_adapters_list[] = {
        &pcan_usb,
        &pcan_usb_pro,
-       NULL,
+       &pcan_usb_fd,
+       &pcan_usb_pro_fd,
 };
 
 /*
@@ -65,7 +68,7 @@ void pcan_dump_mem(char *prompt, void *p, int l)
  * initialize a time_ref object with usb adapter own settings
  */
 void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
-                           struct peak_usb_adapter *adapter)
+                           const struct peak_usb_adapter *adapter)
 {
        if (time_ref) {
                memset(time_ref, 0, sizeof(struct peak_time_ref));
@@ -164,6 +167,21 @@ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
        }
 }
 
+/*
+ * post received skb after having set any hw timestamp
+ */
+int peak_usb_netif_rx(struct sk_buff *skb,
+                     struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high)
+{
+       struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
+       struct timeval tv;
+
+       peak_usb_get_ts_tv(time_ref, ts_low, &tv);
+       hwts->hwtstamp = timeval_to_ktime(tv);
+
+       return netif_rx(skb);
+}
+
 /*
  * callback for bulk Rx urb
  */
@@ -253,7 +271,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
        case 0:
                /* transmission complete */
                netdev->stats.tx_packets++;
-               netdev->stats.tx_bytes += context->dlc;
+               netdev->stats.tx_bytes += context->data_len;
 
                /* prevent tx timeout */
                netdev->trans_start = jiffies;
@@ -289,7 +307,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
        struct peak_usb_device *dev = netdev_priv(netdev);
        struct peak_tx_urb_context *context = NULL;
        struct net_device_stats *stats = &netdev->stats;
-       struct can_frame *cf = (struct can_frame *)skb->data;
+       struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
        struct urb *urb;
        u8 *obuf;
        int i, err;
@@ -322,7 +340,9 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
        }
 
        context->echo_index = i;
-       context->dlc = cf->can_dlc;
+
+       /* Note: this works with CANFD frames too */
+       context->data_len = cfd->len;
 
        usb_anchor_urb(urb, &dev->tx_submitted);
 
@@ -679,19 +699,43 @@ static int peak_usb_set_mode(struct net_device *netdev, enum can_mode mode)
 }
 
 /*
- * candev callback used to set device bitrate.
+ * candev callback used to set device nominal/arbitration bitrate.
  */
 static int peak_usb_set_bittiming(struct net_device *netdev)
 {
        struct peak_usb_device *dev = netdev_priv(netdev);
-       struct can_bittiming *bt = &dev->can.bittiming;
+       const struct peak_usb_adapter *pa = dev->adapter;
 
-       if (dev->adapter->dev_set_bittiming) {
-               int err = dev->adapter->dev_set_bittiming(dev, bt);
+       if (pa->dev_set_bittiming) {
+               struct can_bittiming *bt = &dev->can.bittiming;
+               int err = pa->dev_set_bittiming(dev, bt);
 
                if (err)
                        netdev_info(netdev, "couldn't set bitrate (err %d)\n",
-                               err);
+                                   err);
+               return err;
+       }
+
+       return 0;
+}
+
+/*
+ * candev callback used to set device data bitrate.
+ */
+static int peak_usb_set_data_bittiming(struct net_device *netdev)
+{
+       struct peak_usb_device *dev = netdev_priv(netdev);
+       const struct peak_usb_adapter *pa = dev->adapter;
+
+       if (pa->dev_set_data_bittiming) {
+               struct can_bittiming *bt = &dev->can.data_bittiming;
+               int err = pa->dev_set_data_bittiming(dev, bt);
+
+               if (err)
+                       netdev_info(netdev,
+                                   "couldn't set data bitrate (err %d)\n",
+                                   err);
+
                return err;
        }
 
@@ -709,7 +753,7 @@ static const struct net_device_ops peak_usb_netdev_ops = {
  * create one device which is attached to CAN controller #ctrl_idx of the
  * usb adapter.
  */
-static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
+static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
                               struct usb_interface *intf, int ctrl_idx)
 {
        struct usb_device *usb_dev = interface_to_usbdev(intf);
@@ -750,9 +794,11 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
        dev->can.clock = peak_usb_adapter->clock;
        dev->can.bittiming_const = &peak_usb_adapter->bittiming_const;
        dev->can.do_set_bittiming = peak_usb_set_bittiming;
+       dev->can.data_bittiming_const = &peak_usb_adapter->data_bittiming_const;
+       dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
        dev->can.do_set_mode = peak_usb_set_mode;
-       dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
-                                     CAN_CTRLMODE_LISTENONLY;
+       dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
+       dev->can.ctrlmode_supported = peak_usb_adapter->ctrlmode_supported;
 
        netdev->netdev_ops = &peak_usb_netdev_ops;
 
@@ -857,17 +903,18 @@ static int peak_usb_probe(struct usb_interface *intf,
 {
        struct usb_device *usb_dev = interface_to_usbdev(intf);
        const u16 usb_id_product = le16_to_cpu(usb_dev->descriptor.idProduct);
-       struct peak_usb_adapter *peak_usb_adapter, **pp;
+       const struct peak_usb_adapter *peak_usb_adapter = NULL;
        int i, err = -ENOMEM;
 
        usb_dev = interface_to_usbdev(intf);
 
        /* get corresponding PCAN-USB adapter */
-       for (pp = peak_usb_adapters_list; *pp; pp++)
-               if ((*pp)->device_id == usb_id_product)
+       for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++)
+               if (peak_usb_adapters_list[i]->device_id == usb_id_product) {
+                       peak_usb_adapter = peak_usb_adapters_list[i];
                        break;
+               }
 
-       peak_usb_adapter = *pp;
        if (!peak_usb_adapter) {
                /* should never come except device_id bad usage in this file */
                pr_err("%s: didn't find device id. 0x%x in devices list\n",
index 073b47ff8eee3065b2bf9f1319fc7207c93c2aa8..9e624f05ad4d99d377c3152138b20b82983c4e8f 100644 (file)
@@ -25,6 +25,8 @@
 /* supported device ids. */
 #define PCAN_USB_PRODUCT_ID            0x000c
 #define PCAN_USBPRO_PRODUCT_ID         0x000d
+#define PCAN_USBPROFD_PRODUCT_ID       0x0011
+#define PCAN_USBFD_PRODUCT_ID          0x0012
 
 #define PCAN_USB_DRIVER_NAME           "peak_usb"
 
@@ -44,8 +46,10 @@ struct peak_usb_device;
 struct peak_usb_adapter {
        char *name;
        u32 device_id;
+       u32 ctrlmode_supported;
        struct can_clock clock;
        const struct can_bittiming_const bittiming_const;
+       const struct can_bittiming_const data_bittiming_const;
        unsigned int ctrl_count;
 
        int (*intf_probe)(struct usb_interface *intf);
@@ -57,6 +61,8 @@ struct peak_usb_adapter {
        int (*dev_close)(struct peak_usb_device *dev);
        int (*dev_set_bittiming)(struct peak_usb_device *dev,
                                        struct can_bittiming *bt);
+       int (*dev_set_data_bittiming)(struct peak_usb_device *dev,
+                                     struct can_bittiming *bt);
        int (*dev_set_bus)(struct peak_usb_device *dev, u8 onoff);
        int (*dev_get_device_id)(struct peak_usb_device *dev, u32 *device_id);
        int (*dev_decode_buf)(struct peak_usb_device *dev, struct urb *urb);
@@ -66,6 +72,8 @@ struct peak_usb_adapter {
        int (*dev_stop)(struct peak_usb_device *dev);
        int (*dev_restart_async)(struct peak_usb_device *dev, struct urb *urb,
                                        u8 *buf);
+       int (*do_get_berr_counter)(const struct net_device *netdev,
+                                  struct can_berr_counter *bec);
        u8 ep_msg_in;
        u8 ep_msg_out[PCAN_USB_MAX_CHANNEL];
        u8 ts_used_bits;
@@ -78,21 +86,23 @@ struct peak_usb_adapter {
        int sizeof_dev_private;
 };
 
-extern struct peak_usb_adapter pcan_usb;
-extern struct peak_usb_adapter pcan_usb_pro;
+extern const struct peak_usb_adapter pcan_usb;
+extern const struct peak_usb_adapter pcan_usb_pro;
+extern const struct peak_usb_adapter pcan_usb_fd;
+extern const struct peak_usb_adapter pcan_usb_pro_fd;
 
 struct peak_time_ref {
        struct timeval tv_host_0, tv_host;
        u32 ts_dev_1, ts_dev_2;
        u64 ts_total;
        u32 tick_count;
-       struct peak_usb_adapter *adapter;
+       const struct peak_usb_adapter *adapter;
 };
 
 struct peak_tx_urb_context {
        struct peak_usb_device *dev;
        u32 echo_index;
-       u8 dlc;
+       u8 data_len;
        struct urb *urb;
 };
 
@@ -102,7 +112,7 @@ struct peak_tx_urb_context {
 /* PEAK-System USB device */
 struct peak_usb_device {
        struct can_priv can;
-       struct peak_usb_adapter *adapter;
+       const struct peak_usb_adapter *adapter;
        unsigned int ctrl_idx;
        u32 state;
 
@@ -134,12 +144,14 @@ void pcan_dump_mem(char *prompt, void *p, int l);
 
 /* common timestamp management */
 void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
-                           struct peak_usb_adapter *adapter);
+                           const struct peak_usb_adapter *adapter);
 void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
 void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
 void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
                        struct timeval *tv);
-
+int peak_usb_netif_rx(struct sk_buff *skb,
+                     struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high);
 void peak_usb_async_complete(struct urb *urb);
 void peak_usb_restart_complete(struct peak_usb_device *dev);
+
 #endif
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
new file mode 100644 (file)
index 0000000..962c3f0
--- /dev/null
@@ -0,0 +1,1095 @@
+/*
+ * CAN driver for PEAK System PCAN-USB FD / PCAN-USB Pro FD adapter
+ *
+ * Copyright (C) 2013-2014 Stephane Grosjean <s.grosjean@peak-system.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+#include <linux/module.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include "pcan_usb_core.h"
+#include "pcan_usb_pro.h"
+#include "pcan_ucan.h"
+
+MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB FD adapter");
+MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro FD adapter");
+
+#define PCAN_USBPROFD_CHANNEL_COUNT    2
+#define PCAN_USBFD_CHANNEL_COUNT       1
+
+/* PCAN-USB Pro FD adapter internal clock (Hz) */
+#define PCAN_UFD_CRYSTAL_HZ            80000000
+
+#define PCAN_UFD_CMD_BUFFER_SIZE       512
+#define PCAN_UFD_LOSPD_PKT_SIZE                64
+
+/* PCAN-USB Pro FD command timeout (ms.) */
+#define PCAN_UFD_CMD_TIMEOUT_MS                1000
+
+/* PCAN-USB Pro FD rx/tx buffers size */
+#define PCAN_UFD_RX_BUFFER_SIZE                2048
+#define PCAN_UFD_TX_BUFFER_SIZE                512
+
+/* read some versions info from the hw devcie */
+struct __packed pcan_ufd_fw_info {
+       __le16  size_of;        /* sizeof this */
+       __le16  type;           /* type of this structure */
+       u8      hw_type;        /* Type of hardware (HW_TYPE_xxx) */
+       u8      bl_version[3];  /* Bootloader version */
+       u8      hw_version;     /* Hardware version (PCB) */
+       u8      fw_version[3];  /* Firmware version */
+       __le32  dev_id[2];      /* "device id" per CAN */
+       __le32  ser_no;         /* S/N */
+       __le32  flags;          /* special functions */
+};
+
+/* handle device specific info used by the netdevices */
+struct pcan_usb_fd_if {
+       struct peak_usb_device  *dev[PCAN_USB_MAX_CHANNEL];
+       struct pcan_ufd_fw_info fw_info;
+       struct peak_time_ref    time_ref;
+       int                     cm_ignore_count;
+       int                     dev_opened_count;
+};
+
+/* device information */
+struct pcan_usb_fd_device {
+       struct peak_usb_device  dev;
+       struct can_berr_counter bec;
+       struct pcan_usb_fd_if   *usb_if;
+       u8                      *cmd_buffer_addr;
+};
+
+/* Extended USB commands (non uCAN commands) */
+
+/* Clock Modes command */
+#define PCAN_UFD_CMD_CLK_SET           0x80
+
+#define PCAN_UFD_CLK_80MHZ             0x0
+#define PCAN_UFD_CLK_60MHZ             0x1
+#define PCAN_UFD_CLK_40MHZ             0x2
+#define PCAN_UFD_CLK_30MHZ             0x3
+#define PCAN_UFD_CLK_24MHZ             0x4
+#define PCAN_UFD_CLK_20MHZ             0x5
+#define PCAN_UFD_CLK_DEF               PCAN_UFD_CLK_80MHZ
+
+struct __packed pcan_ufd_clock {
+       __le16  opcode_channel;
+
+       u8      mode;
+       u8      unused[5];
+};
+
+/* LED control command */
+#define PCAN_UFD_CMD_LED_SET           0x86
+
+#define PCAN_UFD_LED_DEV               0x00
+#define PCAN_UFD_LED_FAST              0x01
+#define PCAN_UFD_LED_SLOW              0x02
+#define PCAN_UFD_LED_ON                        0x03
+#define PCAN_UFD_LED_OFF               0x04
+#define PCAN_UFD_LED_DEF               PCAN_UFD_LED_DEV
+
+struct __packed pcan_ufd_led {
+       __le16  opcode_channel;
+
+       u8      mode;
+       u8      unused[5];
+};
+
+/* Extended usage of uCAN commands CMD_RX_FRAME_xxxABLE for PCAN-USB Pro FD */
+#define PCAN_UFD_FLTEXT_CALIBRATION    0x8000
+
+struct __packed pcan_ufd_filter_ext {
+       __le16  opcode_channel;
+
+       __le16  ext_mask;
+       u16     unused;
+       __le16  usb_mask;
+};
+
+/* Extended usage of uCAN messages for PCAN-USB Pro FD */
+#define PCAN_UFD_MSG_CALIBRATION       0x100
+
+struct __packed pcan_ufd_ts_msg {
+       __le16  size;
+       __le16  type;
+       __le32  ts_low;
+       __le32  ts_high;
+       __le16  usb_frame_index;
+       u16     unused;
+};
+
+#define PCAN_UFD_MSG_OVERRUN           0x101
+
+#define PCAN_UFD_OVMSG_CHANNEL(o)      ((o)->channel & 0xf)
+
+struct __packed pcan_ufd_ovr_msg {
+       __le16  size;
+       __le16  type;
+       __le32  ts_low;
+       __le32  ts_high;
+       u8      channel;
+       u8      unused[3];
+};
+
+static inline int pufd_omsg_get_channel(struct pcan_ufd_ovr_msg *om)
+{
+       return om->channel & 0xf;
+}
+
+/* Clock mode frequency values */
+static const u32 pcan_usb_fd_clk_freq[6] = {
+       [PCAN_UFD_CLK_80MHZ] = 80000000,
+       [PCAN_UFD_CLK_60MHZ] = 60000000,
+       [PCAN_UFD_CLK_40MHZ] = 40000000,
+       [PCAN_UFD_CLK_30MHZ] = 30000000,
+       [PCAN_UFD_CLK_24MHZ] = 24000000,
+       [PCAN_UFD_CLK_20MHZ] = 20000000
+};
+
+/* return a device USB interface */
+static inline
+struct pcan_usb_fd_if *pcan_usb_fd_dev_if(struct peak_usb_device *dev)
+{
+       struct pcan_usb_fd_device *pdev =
+                       container_of(dev, struct pcan_usb_fd_device, dev);
+       return pdev->usb_if;
+}
+
+/* return a device USB commands buffer */
+static inline void *pcan_usb_fd_cmd_buffer(struct peak_usb_device *dev)
+{
+       struct pcan_usb_fd_device *pdev =
+                       container_of(dev, struct pcan_usb_fd_device, dev);
+       return pdev->cmd_buffer_addr;
+}
+
+/* send PCAN-USB Pro FD commands synchronously */
+static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
+{
+       void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
+       int err;
+       u8 *packet_ptr;
+       int i, n = 1, packet_len;
+       ptrdiff_t cmd_len;
+
+       /* usb device unregistered? */
+       if (!(dev->state & PCAN_USB_STATE_CONNECTED))
+               return 0;
+
+       /* if a packet is not filled completely by commands, the command list
+        * is terminated with an "end of collection" record.
+        */
+       cmd_len = cmd_tail - cmd_head;
+       if (cmd_len <= (PCAN_UFD_CMD_BUFFER_SIZE - sizeof(u64))) {
+               memset(cmd_tail, 0xff, sizeof(u64));
+               cmd_len += sizeof(u64);
+       }
+
+       packet_ptr = cmd_head;
+
+       /* firmware is not able to re-assemble 512 bytes buffer in full-speed */
+       if ((dev->udev->speed != USB_SPEED_HIGH) &&
+           (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) {
+               packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
+               n += cmd_len / packet_len;
+       } else {
+               packet_len = cmd_len;
+       }
+
+       for (i = 0; i < n; i++) {
+               err = usb_bulk_msg(dev->udev,
+                                  usb_sndbulkpipe(dev->udev,
+                                                  PCAN_USBPRO_EP_CMDOUT),
+                                  packet_ptr, packet_len,
+                                  NULL, PCAN_UFD_CMD_TIMEOUT_MS);
+               if (err) {
+                       netdev_err(dev->netdev,
+                                  "sending command failure: %d\n", err);
+                       break;
+               }
+
+               packet_ptr += packet_len;
+       }
+
+       return err;
+}
+
+/* build the commands list in the given buffer, to enter operational mode */
+static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
+{
+       struct pucan_wr_err_cnt *prc;
+       struct pucan_command *cmd;
+       u8 *pc = buf;
+
+       /* 1st, reset error counters: */
+       prc = (struct pucan_wr_err_cnt *)pc;
+       prc->opcode_channel = pucan_cmd_opcode_channel(dev,
+                                                      PUCAN_CMD_WR_ERR_CNT);
+
+       /* select both counters */
+       prc->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE|PUCAN_WRERRCNT_RE);
+
+       /* and reset their values */
+       prc->tx_counter = 0;
+       prc->rx_counter = 0;
+
+       /* moves the pointer forward */
+       pc += sizeof(struct pucan_wr_err_cnt);
+
+       /* next, go back to operational mode */
+       cmd = (struct pucan_command *)pc;
+       cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+                               (dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) ?
+                                               PUCAN_CMD_LISTEN_ONLY_MODE :
+                                               PUCAN_CMD_NORMAL_MODE);
+       pc += sizeof(struct pucan_command);
+
+       return pc - buf;
+}
+
+/* set CAN bus on/off */
+static int pcan_usb_fd_set_bus(struct peak_usb_device *dev, u8 onoff)
+{
+       u8 *pc = pcan_usb_fd_cmd_buffer(dev);
+       int l;
+
+       if (onoff) {
+               /* build the cmds list to enter operational mode */
+               l = pcan_usb_fd_build_restart_cmd(dev, pc);
+       } else {
+               struct pucan_command *cmd = (struct pucan_command *)pc;
+
+               /* build cmd to go back to reset mode */
+               cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+                                                       PUCAN_CMD_RESET_MODE);
+               l = sizeof(struct pucan_command);
+       }
+
+       /* send the command */
+       return pcan_usb_fd_send_cmd(dev, pc + l);
+}
+
+/* set filtering masks:
+ *
+ *     idx  in range [0..63] selects a row #idx, all rows otherwise
+ *     mask in range [0..0xffffffff] defines up to 32 CANIDs in the row(s)
+ *
+ *     Each bit of this 64 x 32 bits array defines a CANID value:
+ *
+ *     bit[i,j] = 1 implies that CANID=(i x 32)+j will be received, while
+ *     bit[i,j] = 0 implies that CANID=(i x 32)+j will be discarded.
+ */
+static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx,
+                                     u32 mask)
+{
+       struct pucan_filter_std *cmd = pcan_usb_fd_cmd_buffer(dev);
+       int i, n;
+
+       /* select all rows when idx is out of range [0..63] */
+       if ((idx < 0) || (idx >= (1 << PUCAN_FLTSTD_ROW_IDX_BITS))) {
+               n = 1 << PUCAN_FLTSTD_ROW_IDX_BITS;
+               idx = 0;
+
+       /* select the row (and only the row) otherwise */
+       } else {
+               n = idx + 1;
+       }
+
+       for (i = idx; i < n; i++, cmd++) {
+               cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+                                                       PUCAN_CMD_FILTER_STD);
+               cmd->idx = cpu_to_le16(i);
+               cmd->mask = cpu_to_le32(mask);
+       }
+
+       /* send the command */
+       return pcan_usb_fd_send_cmd(dev, cmd);
+}
+
+/* set/unset notifications filter:
+ *
+ *     onoff   sets(1)/unset(0) notifications
+ *     mask    each bit defines a kind of notification to set/unset
+ */
+static int pcan_usb_fd_set_filter_ext(struct peak_usb_device *dev,
+                                     bool onoff, u16 ext_mask, u16 usb_mask)
+{
+       struct pcan_ufd_filter_ext *cmd = pcan_usb_fd_cmd_buffer(dev);
+
+       cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+                                       (onoff) ? PUCAN_CMD_RX_FRAME_ENABLE :
+                                                 PUCAN_CMD_RX_FRAME_DISABLE);
+
+       cmd->ext_mask = cpu_to_le16(ext_mask);
+       cmd->usb_mask = cpu_to_le16(usb_mask);
+
+       /* send the command */
+       return pcan_usb_fd_send_cmd(dev, ++cmd);
+}
+
+/* setup LED control */
+static int pcan_usb_fd_set_can_led(struct peak_usb_device *dev, u8 led_mode)
+{
+       struct pcan_ufd_led *cmd = pcan_usb_fd_cmd_buffer(dev);
+
+       cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+                                                      PCAN_UFD_CMD_LED_SET);
+       cmd->mode = led_mode;
+
+       /* send the command */
+       return pcan_usb_fd_send_cmd(dev, ++cmd);
+}
+
+/* set CAN clock domain */
+static int pcan_usb_fd_set_clock_domain(struct peak_usb_device *dev,
+                                       u8 clk_mode)
+{
+       struct pcan_ufd_clock *cmd = pcan_usb_fd_cmd_buffer(dev);
+
+       cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+                                                      PCAN_UFD_CMD_CLK_SET);
+       cmd->mode = clk_mode;
+
+       /* send the command */
+       return pcan_usb_fd_send_cmd(dev, ++cmd);
+}
+
+/* set bittiming for CAN and CAN-FD header */
+static int pcan_usb_fd_set_bittiming_slow(struct peak_usb_device *dev,
+                                         struct can_bittiming *bt)
+{
+       struct pucan_timing_slow *cmd = pcan_usb_fd_cmd_buffer(dev);
+
+       cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+                                                      PUCAN_CMD_TIMING_SLOW);
+       cmd->sjw_t = PUCAN_TSLOW_SJW_T(bt->sjw - 1,
+                               dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES);
+
+       cmd->tseg2 = PUCAN_TSLOW_TSEG2(bt->phase_seg2 - 1);
+       cmd->tseg1 = PUCAN_TSLOW_TSEG1(bt->prop_seg + bt->phase_seg1 - 1);
+       cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(bt->brp - 1));
+
+       cmd->ewl = 96;  /* default */
+
+       /* send the command */
+       return pcan_usb_fd_send_cmd(dev, ++cmd);
+}
+
+/* set CAN-FD bittiming for data */
+static int pcan_usb_fd_set_bittiming_fast(struct peak_usb_device *dev,
+                                         struct can_bittiming *bt)
+{
+       struct pucan_timing_fast *cmd = pcan_usb_fd_cmd_buffer(dev);
+
+       cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+                                                      PUCAN_CMD_TIMING_FAST);
+       cmd->sjw = PUCAN_TFAST_SJW(bt->sjw - 1);
+       cmd->tseg2 = PUCAN_TFAST_TSEG2(bt->phase_seg2 - 1);
+       cmd->tseg1 = PUCAN_TFAST_TSEG1(bt->prop_seg + bt->phase_seg1 - 1);
+       cmd->brp = cpu_to_le16(PUCAN_TFAST_BRP(bt->brp - 1));
+
+       /* send the command */
+       return pcan_usb_fd_send_cmd(dev, ++cmd);
+}
+
+/* handle restart but in asynchronously way
+ * (uses PCAN-USB Pro code to complete asynchronous request)
+ */
+static int pcan_usb_fd_restart_async(struct peak_usb_device *dev,
+                                    struct urb *urb, u8 *buf)
+{
+       u8 *pc = buf;
+
+       /* build the entire cmds list in the provided buffer, to go back into
+        * operational mode.
+        */
+       pc += pcan_usb_fd_build_restart_cmd(dev, pc);
+
+       /* add EOC */
+       memset(pc, 0xff, sizeof(struct pucan_command));
+       pc += sizeof(struct pucan_command);
+
+       /* complete the URB */
+       usb_fill_bulk_urb(urb, dev->udev,
+                         usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT),
+                         buf, pc - buf,
+                         pcan_usb_pro_restart_complete, dev);
+
+       /* and submit it. */
+       return usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+static int pcan_usb_fd_drv_loaded(struct peak_usb_device *dev, bool loaded)
+{
+       struct pcan_usb_fd_device *pdev =
+                       container_of(dev, struct pcan_usb_fd_device, dev);
+
+       pdev->cmd_buffer_addr[0] = 0;
+       pdev->cmd_buffer_addr[1] = !!loaded;
+
+       return pcan_usb_pro_send_req(dev,
+                               PCAN_USBPRO_REQ_FCT,
+                               PCAN_USBPRO_FCT_DRVLD,
+                               pdev->cmd_buffer_addr,
+                               PCAN_USBPRO_FCT_DRVLD_REQ_LEN);
+}
+
+static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
+                                    struct pucan_msg *rx_msg)
+{
+       struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg;
+       struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)];
+       struct net_device *netdev = dev->netdev;
+       struct canfd_frame *cfd;
+       struct sk_buff *skb;
+       const u16 rx_msg_flags = le16_to_cpu(rm->flags);
+
+       if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
+               /* CANFD frame case */
+               skb = alloc_canfd_skb(netdev, &cfd);
+               if (!skb)
+                       return -ENOMEM;
+
+               if (rx_msg_flags & PUCAN_MSG_BITRATE_SWITCH)
+                       cfd->flags |= CANFD_BRS;
+
+               if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND)
+                       cfd->flags |= CANFD_ESI;
+
+               cfd->len = can_dlc2len(get_canfd_dlc(pucan_msg_get_dlc(rm)));
+       } else {
+               /* CAN 2.0 frame case */
+               skb = alloc_can_skb(netdev, (struct can_frame **)&cfd);
+               if (!skb)
+                       return -ENOMEM;
+
+               cfd->len = get_can_dlc(pucan_msg_get_dlc(rm));
+       }
+
+       cfd->can_id = le32_to_cpu(rm->can_id);
+
+       if (rx_msg_flags & PUCAN_MSG_EXT_ID)
+               cfd->can_id |= CAN_EFF_FLAG;
+
+       if (rx_msg_flags & PUCAN_MSG_RTR)
+               cfd->can_id |= CAN_RTR_FLAG;
+       else
+               memcpy(cfd->data, rm->d, cfd->len);
+
+       peak_usb_netif_rx(skb, &usb_if->time_ref,
+                         le32_to_cpu(rm->ts_low), le32_to_cpu(rm->ts_high));
+
+       netdev->stats.rx_packets++;
+       netdev->stats.rx_bytes += cfd->len;
+
+       return 0;
+}
+
+/* handle uCAN status message */
+static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
+                                    struct pucan_msg *rx_msg)
+{
+       struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg;
+       struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
+       struct pcan_usb_fd_device *pdev =
+                       container_of(dev, struct pcan_usb_fd_device, dev);
+       enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
+       enum can_state rx_state, tx_state;
+       struct net_device *netdev = dev->netdev;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+
+       /* nothing should be sent while in BUS_OFF state */
+       if (dev->can.state == CAN_STATE_BUS_OFF)
+               return 0;
+
+       if (sm->channel_p_w_b & PUCAN_BUS_BUSOFF) {
+               new_state = CAN_STATE_BUS_OFF;
+       } else if (sm->channel_p_w_b & PUCAN_BUS_PASSIVE) {
+               new_state = CAN_STATE_ERROR_PASSIVE;
+       } else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
+               new_state = CAN_STATE_ERROR_WARNING;
+       } else {
+               /* no error bit (so, no error skb, back to active state) */
+               dev->can.state = CAN_STATE_ERROR_ACTIVE;
+               pdev->bec.txerr = 0;
+               pdev->bec.rxerr = 0;
+               return 0;
+       }
+
+       /* state hasn't changed */
+       if (new_state == dev->can.state)
+               return 0;
+
+       /* handle bus state change */
+       tx_state = (pdev->bec.txerr >= pdev->bec.rxerr) ? new_state : 0;
+       rx_state = (pdev->bec.txerr <= pdev->bec.rxerr) ? new_state : 0;
+
+       /* allocate an skb to store the error frame */
+       skb = alloc_can_err_skb(netdev, &cf);
+       if (skb)
+               can_change_state(netdev, cf, tx_state, rx_state);
+
+       /* things must be done even in case of OOM */
+       if (new_state == CAN_STATE_BUS_OFF)
+               can_bus_off(netdev);
+
+       if (!skb)
+               return -ENOMEM;
+
+       peak_usb_netif_rx(skb, &usb_if->time_ref,
+                         le32_to_cpu(sm->ts_low), le32_to_cpu(sm->ts_high));
+
+       netdev->stats.rx_packets++;
+       netdev->stats.rx_bytes += cf->can_dlc;
+
+       return 0;
+}
+
+/* handle uCAN error message */
+static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if,
+                                   struct pucan_msg *rx_msg)
+{
+       struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg;
+       struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)];
+       struct pcan_usb_fd_device *pdev =
+                       container_of(dev, struct pcan_usb_fd_device, dev);
+
+       /* keep a trace of tx and rx error counters for later use */
+       pdev->bec.txerr = er->tx_err_cnt;
+       pdev->bec.rxerr = er->rx_err_cnt;
+
+       return 0;
+}
+
+/* handle uCAN overrun message */
+static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
+                                     struct pucan_msg *rx_msg)
+{
+       struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg;
+       struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)];
+       struct net_device *netdev = dev->netdev;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+
+       /* allocate an skb to store the error frame */
+       skb = alloc_can_err_skb(netdev, &cf);
+       if (!skb)
+               return -ENOMEM;
+
+       cf->can_id |= CAN_ERR_CRTL;
+       cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+
+       peak_usb_netif_rx(skb, &usb_if->time_ref,
+                         le32_to_cpu(ov->ts_low), le32_to_cpu(ov->ts_high));
+
+       netdev->stats.rx_over_errors++;
+       netdev->stats.rx_errors++;
+
+       return 0;
+}
+
+/* handle USB calibration message */
+static void pcan_usb_fd_decode_ts(struct pcan_usb_fd_if *usb_if,
+                                 struct pucan_msg *rx_msg)
+{
+       struct pcan_ufd_ts_msg *ts = (struct pcan_ufd_ts_msg *)rx_msg;
+
+       /* should wait until clock is stabilized */
+       if (usb_if->cm_ignore_count > 0)
+               usb_if->cm_ignore_count--;
+       else
+               peak_usb_set_ts_now(&usb_if->time_ref, le32_to_cpu(ts->ts_low));
+}
+
+/* callback for bulk IN urb */
+static int pcan_usb_fd_decode_buf(struct peak_usb_device *dev, struct urb *urb)
+{
+       struct pcan_usb_fd_if *usb_if = pcan_usb_fd_dev_if(dev);
+       struct net_device *netdev = dev->netdev;
+       struct pucan_msg *rx_msg;
+       u8 *msg_ptr, *msg_end;
+       int err = 0;
+
+       /* loop reading all the records from the incoming message */
+       msg_ptr = urb->transfer_buffer;
+       msg_end = urb->transfer_buffer + urb->actual_length;
+       for (; msg_ptr < msg_end;) {
+               u16 rx_msg_type, rx_msg_size;
+
+               rx_msg = (struct pucan_msg *)msg_ptr;
+               if (!rx_msg->size) {
+                       /* null packet found: end of list */
+                       break;
+               }
+
+               rx_msg_size = le16_to_cpu(rx_msg->size);
+               rx_msg_type = le16_to_cpu(rx_msg->type);
+
+               /* check if the record goes out of current packet */
+               if (msg_ptr + rx_msg_size > msg_end) {
+                       netdev_err(netdev,
+                                  "got frag rec: should inc usb rx buf sze\n");
+                       err = -EBADMSG;
+                       break;
+               }
+
+               switch (rx_msg_type) {
+               case PUCAN_MSG_CAN_RX:
+                       err = pcan_usb_fd_decode_canmsg(usb_if, rx_msg);
+                       if (err < 0)
+                               goto fail;
+                       break;
+
+               case PCAN_UFD_MSG_CALIBRATION:
+                       pcan_usb_fd_decode_ts(usb_if, rx_msg);
+                       break;
+
+               case PUCAN_MSG_ERROR:
+                       err = pcan_usb_fd_decode_error(usb_if, rx_msg);
+                       if (err < 0)
+                               goto fail;
+                       break;
+
+               case PUCAN_MSG_STATUS:
+                       err = pcan_usb_fd_decode_status(usb_if, rx_msg);
+                       if (err < 0)
+                               goto fail;
+                       break;
+
+               case PCAN_UFD_MSG_OVERRUN:
+                       err = pcan_usb_fd_decode_overrun(usb_if, rx_msg);
+                       if (err < 0)
+                               goto fail;
+                       break;
+
+               default:
+                       netdev_err(netdev,
+                                  "unhandled msg type 0x%02x (%d): ignored\n",
+                                  rx_msg_type, rx_msg_type);
+                       break;
+               }
+
+               msg_ptr += rx_msg_size;
+       }
+
+fail:
+       if (err)
+               pcan_dump_mem("received msg",
+                             urb->transfer_buffer, urb->actual_length);
+       return err;
+}
+
+/* CAN/CANFD frames encoding callback */
+static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev,
+                                 struct sk_buff *skb, u8 *obuf, size_t *size)
+{
+       struct pucan_tx_msg *tx_msg = (struct pucan_tx_msg *)obuf;
+       struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+       u16 tx_msg_size, tx_msg_flags;
+       u8 can_dlc;
+
+       tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4);
+       tx_msg->size = cpu_to_le16(tx_msg_size);
+       tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
+
+       tx_msg_flags = 0;
+       if (cfd->can_id & CAN_EFF_FLAG) {
+               tx_msg_flags |= PUCAN_MSG_EXT_ID;
+               tx_msg->can_id = cpu_to_le32(cfd->can_id & CAN_EFF_MASK);
+       } else {
+               tx_msg->can_id = cpu_to_le32(cfd->can_id & CAN_SFF_MASK);
+       }
+
+       if (can_is_canfd_skb(skb)) {
+               /* considering a CANFD frame */
+               can_dlc = can_len2dlc(cfd->len);
+
+               tx_msg_flags |= PUCAN_MSG_EXT_DATA_LEN;
+
+               if (cfd->flags & CANFD_BRS)
+                       tx_msg_flags |= PUCAN_MSG_BITRATE_SWITCH;
+
+               if (cfd->flags & CANFD_ESI)
+                       tx_msg_flags |= PUCAN_MSG_ERROR_STATE_IND;
+       } else {
+               /* CAND 2.0 frames */
+               can_dlc = cfd->len;
+
+               if (cfd->can_id & CAN_RTR_FLAG)
+                       tx_msg_flags |= PUCAN_MSG_RTR;
+       }
+
+       tx_msg->flags = cpu_to_le16(tx_msg_flags);
+       tx_msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(dev->ctrl_idx, can_dlc);
+       memcpy(tx_msg->d, cfd->data, cfd->len);
+
+       /* add null size message to tag the end (messages are 32-bits aligned)
+        */
+       tx_msg = (struct pucan_tx_msg *)(obuf + tx_msg_size);
+
+       tx_msg->size = 0;
+
+       /* set the whole size of the USB packet to send */
+       *size = tx_msg_size + sizeof(u32);
+
+       return 0;
+}
+
+/* start the interface (last chance before set bus on) */
+static int pcan_usb_fd_start(struct peak_usb_device *dev)
+{
+       struct pcan_usb_fd_device *pdev =
+                       container_of(dev, struct pcan_usb_fd_device, dev);
+       int err;
+
+       /* set filter mode: all acceptance */
+       err = pcan_usb_fd_set_filter_std(dev, -1, 0xffffffff);
+       if (err)
+               return err;
+
+       /* opening first device: */
+       if (pdev->usb_if->dev_opened_count == 0) {
+               /* reset time_ref */
+               peak_usb_init_time_ref(&pdev->usb_if->time_ref,
+                                      &pcan_usb_pro_fd);
+
+               /* enable USB calibration messages */
+               err = pcan_usb_fd_set_filter_ext(dev, 1,
+                                                PUCAN_FLTEXT_ERROR,
+                                                PCAN_UFD_FLTEXT_CALIBRATION);
+       }
+
+       pdev->usb_if->dev_opened_count++;
+
+       /* reset cached error counters */
+       pdev->bec.txerr = 0;
+       pdev->bec.rxerr = 0;
+
+       return err;
+}
+
+/* socket callback used to copy berr counters values receieved through USB */
+static int pcan_usb_fd_get_berr_counter(const struct net_device *netdev,
+                                       struct can_berr_counter *bec)
+{
+       struct peak_usb_device *dev = netdev_priv(netdev);
+       struct pcan_usb_fd_device *pdev =
+                       container_of(dev, struct pcan_usb_fd_device, dev);
+
+       *bec = pdev->bec;
+
+       /* must return 0 */
+       return 0;
+}
+
+/* stop interface (last chance before set bus off) */
+static int pcan_usb_fd_stop(struct peak_usb_device *dev)
+{
+       struct pcan_usb_fd_device *pdev =
+                       container_of(dev, struct pcan_usb_fd_device, dev);
+
+       /* turn off special msgs for that interface if no other dev opened */
+       if (pdev->usb_if->dev_opened_count == 1)
+               pcan_usb_fd_set_filter_ext(dev, 0,
+                                          PUCAN_FLTEXT_ERROR,
+                                          PCAN_UFD_FLTEXT_CALIBRATION);
+       pdev->usb_if->dev_opened_count--;
+
+       return 0;
+}
+
+/* called when probing, to initialize a device object */
+static int pcan_usb_fd_init(struct peak_usb_device *dev)
+{
+       struct pcan_usb_fd_device *pdev =
+                       container_of(dev, struct pcan_usb_fd_device, dev);
+       int i, err = -ENOMEM;
+
+       /* do this for 1st channel only */
+       if (!dev->prev_siblings) {
+               /* allocate netdevices common structure attached to first one */
+               pdev->usb_if = kzalloc(sizeof(*pdev->usb_if), GFP_KERNEL);
+               if (!pdev->usb_if)
+                       goto err_out;
+
+               /* allocate command buffer once for all for the interface */
+               pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
+                                               GFP_KERNEL);
+               if (!pdev->cmd_buffer_addr)
+                       goto err_out_1;
+
+               /* number of ts msgs to ignore before taking one into account */
+               pdev->usb_if->cm_ignore_count = 5;
+
+               err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
+                                           PCAN_USBPRO_INFO_FW,
+                                           &pdev->usb_if->fw_info,
+                                           sizeof(pdev->usb_if->fw_info));
+               if (err) {
+                       dev_err(dev->netdev->dev.parent,
+                               "unable to read %s firmware info (err %d)\n",
+                               dev->adapter->name, err);
+                       goto err_out_2;
+               }
+
+               /* explicit use of dev_xxx() instead of netdev_xxx() here:
+                * information displayed are related to the device itself, not
+                * to the canx (channel) device.
+                */
+               dev_info(dev->netdev->dev.parent,
+                        "PEAK-System %s v%u fw v%u.%u.%u (%u channels)\n",
+                        dev->adapter->name, pdev->usb_if->fw_info.hw_version,
+                        pdev->usb_if->fw_info.fw_version[0],
+                        pdev->usb_if->fw_info.fw_version[1],
+                        pdev->usb_if->fw_info.fw_version[2],
+                        dev->adapter->ctrl_count);
+
+               /* the currently supported hw is non-ISO */
+               dev->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
+
+               /* tell the hardware the can driver is running */
+               err = pcan_usb_fd_drv_loaded(dev, 1);
+               if (err) {
+                       dev_err(dev->netdev->dev.parent,
+                               "unable to tell %s driver is loaded (err %d)\n",
+                               dev->adapter->name, err);
+                       goto err_out_2;
+               }
+       } else {
+               /* otherwise, simply copy previous sibling's values */
+               struct pcan_usb_fd_device *ppdev =
+                       container_of(dev->prev_siblings,
+                                    struct pcan_usb_fd_device, dev);
+
+               pdev->usb_if = ppdev->usb_if;
+               pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr;
+       }
+
+       pdev->usb_if->dev[dev->ctrl_idx] = dev;
+       dev->device_number =
+               le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]);
+
+       /* set clock domain */
+       for (i = 0; i < ARRAY_SIZE(pcan_usb_fd_clk_freq); i++)
+               if (dev->adapter->clock.freq == pcan_usb_fd_clk_freq[i])
+                       break;
+
+       if (i >= ARRAY_SIZE(pcan_usb_fd_clk_freq)) {
+               dev_warn(dev->netdev->dev.parent,
+                        "incompatible clock frequencies\n");
+               err = -EINVAL;
+               goto err_out_2;
+       }
+
+       pcan_usb_fd_set_clock_domain(dev, i);
+
+       /* set LED in default state (end of init phase) */
+       pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_DEF);
+
+       return 0;
+
+err_out_2:
+       kfree(pdev->cmd_buffer_addr);
+err_out_1:
+       kfree(pdev->usb_if);
+err_out:
+       return err;
+}
+
+/* called when driver module is being unloaded */
+static void pcan_usb_fd_exit(struct peak_usb_device *dev)
+{
+       struct pcan_usb_fd_device *pdev =
+                       container_of(dev, struct pcan_usb_fd_device, dev);
+
+       /* when rmmod called before unplug and if down, should reset things
+        * before leaving
+        */
+       if (dev->can.state != CAN_STATE_STOPPED) {
+               /* set bus off on the corresponding channel */
+               pcan_usb_fd_set_bus(dev, 0);
+       }
+
+       /* switch off corresponding CAN LEDs */
+       pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_OFF);
+
+       /* if channel #0 (only) */
+       if (dev->ctrl_idx == 0) {
+               /* turn off calibration message if any device were opened */
+               if (pdev->usb_if->dev_opened_count > 0)
+                       pcan_usb_fd_set_filter_ext(dev, 0,
+                                                  PUCAN_FLTEXT_ERROR,
+                                                  PCAN_UFD_FLTEXT_CALIBRATION);
+
+               /* tell USB adapter that the driver is being unloaded */
+               pcan_usb_fd_drv_loaded(dev, 0);
+       }
+}
+
+/* called when the USB adapter is unplugged */
+static void pcan_usb_fd_free(struct peak_usb_device *dev)
+{
+       /* last device: can free shared objects now */
+       if (!dev->prev_siblings && !dev->next_siblings) {
+               struct pcan_usb_fd_device *pdev =
+                       container_of(dev, struct pcan_usb_fd_device, dev);
+
+               /* free commands buffer */
+               kfree(pdev->cmd_buffer_addr);
+
+               /* free usb interface object */
+               kfree(pdev->usb_if);
+       }
+}
+
+/* describes the PCAN-USB FD adapter */
+const struct peak_usb_adapter pcan_usb_fd = {
+       .name = "PCAN-USB FD",
+       .device_id = PCAN_USBFD_PRODUCT_ID,
+       .ctrl_count = PCAN_USBFD_CHANNEL_COUNT,
+       .ctrlmode_supported = CAN_CTRLMODE_FD |
+                       CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+       .clock = {
+               .freq = PCAN_UFD_CRYSTAL_HZ,
+       },
+       .bittiming_const = {
+               .name = "pcan_usb_fd",
+               .tseg1_min = 1,
+               .tseg1_max = 64,
+               .tseg2_min = 1,
+               .tseg2_max = 16,
+               .sjw_max = 16,
+               .brp_min = 1,
+               .brp_max = 1024,
+               .brp_inc = 1,
+       },
+       .data_bittiming_const = {
+               .name = "pcan_usb_fd",
+               .tseg1_min = 1,
+               .tseg1_max = 16,
+               .tseg2_min = 1,
+               .tseg2_max = 8,
+               .sjw_max = 4,
+               .brp_min = 1,
+               .brp_max = 1024,
+               .brp_inc = 1,
+       },
+
+       /* size of device private data */
+       .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
+
+       /* timestamps usage */
+       .ts_used_bits = 32,
+       .ts_period = 1000000, /* calibration period in ts. */
+       .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
+       .us_per_ts_shift = 0,
+
+       /* give here messages in/out endpoints */
+       .ep_msg_in = PCAN_USBPRO_EP_MSGIN,
+       .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0},
+
+       /* size of rx/tx usb buffers */
+       .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE,
+       .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
+
+       /* device callbacks */
+       .intf_probe = pcan_usb_pro_probe,       /* same as PCAN-USB Pro */
+       .dev_init = pcan_usb_fd_init,
+
+       .dev_exit = pcan_usb_fd_exit,
+       .dev_free = pcan_usb_fd_free,
+       .dev_set_bus = pcan_usb_fd_set_bus,
+       .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
+       .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+       .dev_decode_buf = pcan_usb_fd_decode_buf,
+       .dev_start = pcan_usb_fd_start,
+       .dev_stop = pcan_usb_fd_stop,
+       .dev_restart_async = pcan_usb_fd_restart_async,
+       .dev_encode_msg = pcan_usb_fd_encode_msg,
+
+       .do_get_berr_counter = pcan_usb_fd_get_berr_counter,
+};
+
+/* describes the PCAN-USB Pro FD adapter */
+const struct peak_usb_adapter pcan_usb_pro_fd = {
+       .name = "PCAN-USB Pro FD",
+       .device_id = PCAN_USBPROFD_PRODUCT_ID,
+       .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT,
+       .ctrlmode_supported = CAN_CTRLMODE_FD |
+                       CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+       .clock = {
+               .freq = PCAN_UFD_CRYSTAL_HZ,
+       },
+       .bittiming_const = {
+               .name = "pcan_usb_pro_fd",
+               .tseg1_min = 1,
+               .tseg1_max = 64,
+               .tseg2_min = 1,
+               .tseg2_max = 16,
+               .sjw_max = 16,
+               .brp_min = 1,
+               .brp_max = 1024,
+               .brp_inc = 1,
+       },
+       .data_bittiming_const = {
+               .name = "pcan_usb_pro_fd",
+               .tseg1_min = 1,
+               .tseg1_max = 16,
+               .tseg2_min = 1,
+               .tseg2_max = 8,
+               .sjw_max = 4,
+               .brp_min = 1,
+               .brp_max = 1024,
+               .brp_inc = 1,
+       },
+
+       /* size of device private data */
+       .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
+
+       /* timestamps usage */
+       .ts_used_bits = 32,
+       .ts_period = 1000000, /* calibration period in ts. */
+       .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
+       .us_per_ts_shift = 0,
+
+       /* give here messages in/out endpoints */
+       .ep_msg_in = PCAN_USBPRO_EP_MSGIN,
+       .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1},
+
+       /* size of rx/tx usb buffers */
+       .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE,
+       .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
+
+       /* device callbacks */
+       .intf_probe = pcan_usb_pro_probe,       /* same as PCAN-USB Pro */
+       .dev_init = pcan_usb_fd_init,
+
+       .dev_exit = pcan_usb_fd_exit,
+       .dev_free = pcan_usb_fd_free,
+       .dev_set_bus = pcan_usb_fd_set_bus,
+       .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
+       .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+       .dev_decode_buf = pcan_usb_fd_decode_buf,
+       .dev_start = pcan_usb_fd_start,
+       .dev_stop = pcan_usb_fd_stop,
+       .dev_restart_async = pcan_usb_fd_restart_async,
+       .dev_encode_msg = pcan_usb_fd_encode_msg,
+
+       .do_get_berr_counter = pcan_usb_fd_get_berr_counter,
+};
index 4cfa3b8605b19f08ecdeed869f2874e72957d5b7..dec51717635e900aafc91b771e5bfa0a2b594fae 100644 (file)
 
 MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro adapter");
 
-/* PCAN-USB Pro Endpoints */
-#define PCAN_USBPRO_EP_CMDOUT          1
-#define PCAN_USBPRO_EP_CMDIN           (PCAN_USBPRO_EP_CMDOUT | USB_DIR_IN)
-#define PCAN_USBPRO_EP_MSGOUT_0                2
-#define PCAN_USBPRO_EP_MSGIN           (PCAN_USBPRO_EP_MSGOUT_0 | USB_DIR_IN)
-#define PCAN_USBPRO_EP_MSGOUT_1                3
-#define PCAN_USBPRO_EP_UNUSED          (PCAN_USBPRO_EP_MSGOUT_1 | USB_DIR_IN)
-
 #define PCAN_USBPRO_CHANNEL_COUNT      2
 
 /* PCAN-USB Pro adapter internal clock (MHz) */
@@ -322,8 +314,8 @@ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev,
        return (i >= PCAN_USBPRO_RSP_SUBMIT_MAX) ? -ERANGE : err;
 }
 
-static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
-                                int req_value, void *req_addr, int req_size)
+int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
+                         int req_value, void *req_addr, int req_size)
 {
        int err;
        u8 req_type;
@@ -475,7 +467,7 @@ static int pcan_usb_pro_set_bittiming(struct peak_usb_device *dev,
        return pcan_usb_pro_set_bitrate(dev, ccbt);
 }
 
-static void pcan_usb_pro_restart_complete(struct urb *urb)
+void pcan_usb_pro_restart_complete(struct urb *urb)
 {
        /* can delete usb resources */
        peak_usb_async_complete(urb);
@@ -634,6 +626,7 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
        switch (new_state) {
        case CAN_STATE_BUS_OFF:
                can_frame->can_id |= CAN_ERR_BUSOFF;
+               dev->can.can_stats.bus_off++;
                can_bus_off(netdev);
                break;
 
@@ -977,7 +970,7 @@ static void pcan_usb_pro_free(struct peak_usb_device *dev)
 /*
  * probe function for new PCAN-USB Pro usb interface
  */
-static int pcan_usb_pro_probe(struct usb_interface *intf)
+int pcan_usb_pro_probe(struct usb_interface *intf)
 {
        struct usb_host_interface *if_desc;
        int i;
@@ -1011,10 +1004,11 @@ static int pcan_usb_pro_probe(struct usb_interface *intf)
 /*
  * describe the PCAN-USB Pro adapter
  */
-struct peak_usb_adapter pcan_usb_pro = {
+const struct peak_usb_adapter pcan_usb_pro = {
        .name = "PCAN-USB Pro",
        .device_id = PCAN_USBPRO_PRODUCT_ID,
        .ctrl_count = PCAN_USBPRO_CHANNEL_COUNT,
+       .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
        .clock = {
                .freq = PCAN_USBPRO_CRYSTAL_HZ,
        },
index 837cee2671326f3d14b85525f14005222a783ff1..a62f7ab8980f77c23889c59c9f6c6229e3d268db 100644 (file)
 #define PCAN_USBPRO_INFO_BL            0
 #define PCAN_USBPRO_INFO_FW            1
 
+/* PCAN-USB Pro (FD) Endpoints */
+#define PCAN_USBPRO_EP_CMDOUT          1
+#define PCAN_USBPRO_EP_CMDIN           (PCAN_USBPRO_EP_CMDOUT | USB_DIR_IN)
+#define PCAN_USBPRO_EP_MSGOUT_0                2
+#define PCAN_USBPRO_EP_MSGIN           (PCAN_USBPRO_EP_MSGOUT_0 | USB_DIR_IN)
+#define PCAN_USBPRO_EP_MSGOUT_1                3
+#define PCAN_USBPRO_EP_UNUSED          (PCAN_USBPRO_EP_MSGOUT_1 | USB_DIR_IN)
+
 /* Vendor Request value for XXX_FCT */
 #define PCAN_USBPRO_FCT_DRVLD          5 /* tell device driver is loaded */
 #define PCAN_USBPRO_FCT_DRVLD_REQ_LEN  16
@@ -176,4 +184,9 @@ union pcan_usb_pro_rec {
        struct pcan_usb_pro_txmsg       tx_msg;
 };
 
+int pcan_usb_pro_probe(struct usb_interface *intf);
+int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
+                         int req_value, void *req_addr, int req_size);
+void pcan_usb_pro_restart_complete(struct urb *urb);
+
 #endif
index ef674ecb82f8e64ef3b57de77626ff31c67e1ba5..dd52c7a4c80d9f26faa74ece8d109ee21045ba47 100644 (file)
@@ -377,6 +377,7 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
        case USB_8DEV_STATUSMSG_BUSOFF:
                priv->can.state = CAN_STATE_BUS_OFF;
                cf->can_id |= CAN_ERR_BUSOFF;
+               priv->can.can_stats.bus_off++;
                can_bus_off(priv->netdev);
                break;
        case USB_8DEV_STATUSMSG_OVERRUN:
index feb29c4526f7eea698ef8568b784daafe467980d..4daffb2849319df132eccb5068a9164fe60b916d 100644 (file)
@@ -233,6 +233,35 @@ static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
        core_writel(priv, reg, CORE_EEE_EN_CTRL);
 }
 
+static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
+{
+       struct bcm_sf2_priv *priv = ds_to_priv(ds);
+       u32 reg;
+
+       reg = reg_readl(priv, REG_SPHY_CNTRL);
+       if (enable) {
+               reg |= PHY_RESET;
+               reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
+               reg_writel(priv, reg, REG_SPHY_CNTRL);
+               udelay(21);
+               reg = reg_readl(priv, REG_SPHY_CNTRL);
+               reg &= ~PHY_RESET;
+       } else {
+               reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
+               reg_writel(priv, reg, REG_SPHY_CNTRL);
+               mdelay(1);
+               reg |= CK25_DIS;
+       }
+       reg_writel(priv, reg, REG_SPHY_CNTRL);
+
+       /* Use PHY-driven LED signaling */
+       if (!enable) {
+               reg = reg_readl(priv, REG_LED_CNTRL(0));
+               reg |= SPDLNK_SRC_SEL;
+               reg_writel(priv, reg, REG_LED_CNTRL(0));
+       }
+}
+
 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
                              struct phy_device *phy)
 {
@@ -248,6 +277,24 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
        /* Clear the Rx and Tx disable bits and set to no spanning tree */
        core_writel(priv, 0, CORE_G_PCTL_PORT(port));
 
+       /* Re-enable the GPHY and re-apply workarounds */
+       if (port == 0 && priv->hw_params.num_gphy == 1) {
+               bcm_sf2_gphy_enable_set(ds, true);
+               if (phy) {
+                       /* if phy_stop() has been called before, phy
+                        * will be in halted state, and phy_start()
+                        * will call resume.
+                        *
+                        * the resume path does not configure back
+                        * autoneg settings, and since we hard reset
+                        * the phy manually here, we need to reset the
+                        * state machine also.
+                        */
+                       phy->state = PHY_READY;
+                       phy_init_hw(phy);
+               }
+       }
+
        /* Enable port 7 interrupts to get notified */
        if (port == 7)
                intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF));
@@ -281,6 +328,9 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
                intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR);
        }
 
+       if (port == 0 && priv->hw_params.num_gphy == 1)
+               bcm_sf2_gphy_enable_set(ds, false);
+
        if (dsa_is_cpu_port(ds, port))
                off = CORE_IMP_CTL;
        else
@@ -400,6 +450,16 @@ static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
        return 0;
 }
 
+static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
+{
+       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+       intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+       intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+}
+
 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
 {
        const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
@@ -440,12 +500,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
        }
 
        /* Disable all interrupts and request them */
-       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
-       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
-       intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
-       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
-       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
-       intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+       bcm_sf2_intr_disable(priv);
 
        ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
                          "switch_0", priv);
@@ -747,12 +802,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
        struct bcm_sf2_priv *priv = ds_to_priv(ds);
        unsigned int port;
 
-       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
-       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
-       intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
-       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
-       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
-       intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+       bcm_sf2_intr_disable(priv);
 
        /* Disable all ports physically present including the IMP
         * port, the other ones have already been disabled during
@@ -771,7 +821,6 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
 {
        struct bcm_sf2_priv *priv = ds_to_priv(ds);
        unsigned int port;
-       u32 reg;
        int ret;
 
        ret = bcm_sf2_sw_rst(priv);
@@ -780,17 +829,8 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
                return ret;
        }
 
-       /* Reinitialize the single GPHY */
-       if (priv->hw_params.num_gphy == 1) {
-               reg = reg_readl(priv, REG_SPHY_CNTRL);
-               reg |= PHY_RESET;
-               reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS);
-               reg_writel(priv, reg, REG_SPHY_CNTRL);
-               udelay(21);
-               reg = reg_readl(priv, REG_SPHY_CNTRL);
-               reg &= ~PHY_RESET;
-               reg_writel(priv, reg, REG_SPHY_CNTRL);
-       }
+       if (priv->hw_params.num_gphy == 1)
+               bcm_sf2_gphy_enable_set(ds, true);
 
        for (port = 0; port < DSA_MAX_PORTS; port++) {
                if ((1 << port) & ds->phys_port_mask)
index 1bb49cb699ab02a5af7ef1de66ab6bbed0aa47ae..cabdfa5e217af7fcb4d7dbfb37a141a987d6fd57 100644 (file)
 #define  LPI_COUNT_SHIFT               9
 #define  LPI_COUNT_MASK                        0x3F
 
+#define REG_LED_CNTRL_BASE             0x90
+#define REG_LED_CNTRL(x)               (REG_LED_CNTRL_BASE + (x) * 4)
+#define  SPDLNK_SRC_SEL                        (1 << 24)
+
 /* Register set relative to 'INTRL2_0' and 'INTRL2_1' */
 #define INTRL2_CPU_STATUS              0x00
 #define INTRL2_CPU_SET                 0x04
index 258d9ef5ef254e49a0fe27586049bbc2a59ebc61..e13adc7b3ddaf4cc5742d99b68340f152ba50184 100644 (file)
 #include <net/dsa.h>
 #include "mv88e6xxx.h"
 
-static int mv88e6352_wait(struct dsa_switch *ds, int reg, u16 mask)
+static int mv88e6352_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
 {
        unsigned long timeout = jiffies + HZ / 10;
 
        while (time_before(jiffies, timeout)) {
                int ret;
 
-               ret = REG_READ(REG_GLOBAL2, reg);
-               if (ret < 0)
-                       return ret;
-
+               ret = REG_READ(reg, offset);
                if (!(ret & mask))
                        return 0;
 
@@ -43,17 +40,17 @@ static int mv88e6352_wait(struct dsa_switch *ds, int reg, u16 mask)
 
 static inline int mv88e6352_phy_wait(struct dsa_switch *ds)
 {
-       return mv88e6352_wait(ds, 0x18, 0x8000);
+       return mv88e6352_wait(ds, REG_GLOBAL2, 0x18, 0x8000);
 }
 
 static inline int mv88e6352_eeprom_load_wait(struct dsa_switch *ds)
 {
-       return mv88e6352_wait(ds, 0x14, 0x0800);
+       return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x0800);
 }
 
 static inline int mv88e6352_eeprom_busy_wait(struct dsa_switch *ds)
 {
-       return mv88e6352_wait(ds, 0x14, 0x8000);
+       return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x8000);
 }
 
 static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum)
index cd6807c6b4eddaec7eb57ebc2107c0290ff1018f..3e7e31a6abb73f28ea44c9314c4f3628337edab7 100644 (file)
@@ -85,6 +85,12 @@ int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
        ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
        mutex_unlock(&ps->smi_mutex);
 
+       if (ret < 0)
+               return ret;
+
+       dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+               addr, reg, ret);
+
        return ret;
 }
 
@@ -128,6 +134,9 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
        if (bus == NULL)
                return -EINVAL;
 
+       dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+               addr, reg, val);
+
        mutex_lock(&ps->smi_mutex);
        ret = __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
        mutex_unlock(&ps->smi_mutex);
index dede43f4ce095bbea6a85923891acae0e704563d..8f8418d2ac4ad07016c37e6d5cb255e2b9709b85 100644 (file)
@@ -769,11 +769,11 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
                first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
        }
 
-       if(vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                first_txd->processFlags |=
                    TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
                first_txd->processFlags |=
-                   cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
+                   cpu_to_le32(htons(skb_vlan_tag_get(skb)) <<
                                TYPHOON_TX_PF_VLAN_TAG_SHIFT);
        }
 
index 1fcd5568a3520981fd6ac03b57a3a599c2876a6c..f3470d96837a7fb0e59307b000855fe18a882af5 100644 (file)
@@ -850,8 +850,10 @@ static int emac_probe(struct platform_device *pdev)
        }
 
        db->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(db->clk))
+       if (IS_ERR(db->clk)) {
+               ret = PTR_ERR(db->clk);
                goto out;
+       }
 
        clk_prepare_enable(db->clk);
 
index b68074803de32d54accdf919fe5cda7ca025ecb2..b90a26b13fdf22a61b08b311a4f53dbd77f81a6b 100644 (file)
@@ -2429,9 +2429,9 @@ restart:
                flagsize = (skb->len << 16) | (BD_FLG_END);
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        flagsize |= BD_FLG_TCP_UDP_SUM;
-               if (vlan_tx_tag_present(skb)) {
+               if (skb_vlan_tag_present(skb)) {
                        flagsize |= BD_FLG_VLAN_TAG;
-                       vlan_tag = vlan_tx_tag_get(skb);
+                       vlan_tag = skb_vlan_tag_get(skb);
                }
                desc = ap->tx_ring + idx;
                idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
@@ -2450,9 +2450,9 @@ restart:
                flagsize = (skb_headlen(skb) << 16);
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        flagsize |= BD_FLG_TCP_UDP_SUM;
-               if (vlan_tx_tag_present(skb)) {
+               if (skb_vlan_tag_present(skb)) {
                        flagsize |= BD_FLG_VLAN_TAG;
-                       vlan_tag = vlan_tx_tag_get(skb);
+                       vlan_tag = skb_vlan_tag_get(skb);
                }
 
                ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
index 3498760dc22a96c17419abbda9250cc959165a39..760c72c6e2acd50ba8472e4b4dd77170c2c381d6 100644 (file)
@@ -1170,10 +1170,6 @@ tx_request_irq_error:
 init_error:
        free_skbufs(dev);
 alloc_skbuf_error:
-       if (priv->phydev) {
-               phy_disconnect(priv->phydev);
-               priv->phydev = NULL;
-       }
 phy_error:
        return ret;
 }
@@ -1186,12 +1182,9 @@ static int tse_shutdown(struct net_device *dev)
        int ret;
        unsigned long int flags;
 
-       /* Stop and disconnect the PHY */
-       if (priv->phydev) {
+       /* Stop the PHY */
+       if (priv->phydev)
                phy_stop(priv->phydev);
-               phy_disconnect(priv->phydev);
-               priv->phydev = NULL;
-       }
 
        netif_stop_queue(dev);
        napi_disable(&priv->napi);
@@ -1525,6 +1518,10 @@ err_free_netdev:
 static int altera_tse_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
+       struct altera_tse_private *priv = netdev_priv(ndev);
+
+       if (priv->phydev)
+               phy_disconnect(priv->phydev);
 
        platform_set_drvdata(pdev, NULL);
        altera_tse_mdio_destroy(ndev);
index 7a5e4aa5415e2652a13d9624a70003a5e2f6ad3d..c638c85f3954bc685db3ccd9ac338d1375b8de02 100644 (file)
@@ -45,7 +45,7 @@ config AMD8111_ETH
 
 config LANCE
        tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
-       depends on ISA && ISA_DMA_API
+       depends on ISA && ISA_DMA_API && !ARM
        ---help---
          If you have a network (Ethernet) card of this type, say Y and read
          the Ethernet-HOWTO, available from
@@ -142,7 +142,7 @@ config PCMCIA_NMCLAN
 
 config NI65
        tristate "NI6510 support"
-       depends on ISA && ISA_DMA_API
+       depends on ISA && ISA_DMA_API && !ARM
        ---help---
          If you have a network (Ethernet) card of this type, say Y and read
          the Ethernet-HOWTO, available from
@@ -179,7 +179,7 @@ config SUNLANCE
 
 config AMD_XGBE
        tristate "AMD 10GbE Ethernet driver"
-       depends on OF_NET && HAS_IOMEM
+       depends on (OF_NET || ACPI) && HAS_IOMEM
        select PHYLIB
        select AMD_XGBE_PHY
        select BITREVERSE
index 841e6558db682757025e6284cd37d12c81e24039..4c2ae22217804fd8ce8e6cc8a27c6cf2855abba2 100644 (file)
@@ -1299,11 +1299,11 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
        lp->tx_ring[tx_index].tx_flags = 0;
 
 #if AMD8111E_VLAN_TAG_USED
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                lp->tx_ring[tx_index].tag_ctrl_cmd |=
                                cpu_to_le16(TCC_VLAN_INSERT);
                lp->tx_ring[tx_index].tag_ctrl_info =
-                               cpu_to_le16(vlan_tx_tag_get(skb));
+                               cpu_to_le16(skb_vlan_tag_get(skb));
 
        }
 #endif
index 5b22764ba88d2304c3502cd987420e7f955fd793..27245efe9f50098eee594cb844e5e1647978bf3d 100644 (file)
@@ -952,6 +952,8 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
   do {
     /* WARNING: MACE_IR is a READ/CLEAR port! */
     status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
+    if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS)
+      return IRQ_NONE;
 
     pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
 
index e2e3aaf501a20f7b9e8a7838b9b410ec89017c4d..11d6e6561df159c3dc9dff28fc504e945a77f47b 100644 (file)
@@ -2806,7 +2806,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
 
 /*
  * Check for loss of link and link establishment.
- * Can not use mii_check_media because it does nothing if mode is forced.
+ * Could possibly be changed to use mii_check_media instead.
  */
 
 static void pcnet32_watchdog(struct net_device *dev)
index 75b08c63d39f4a469a389c0327e1f3071932596f..29a09271b64a39b71a46ac1d5beb5a6472160509 100644 (file)
 #define MTL_Q_RQOMR                    0x40
 #define MTL_Q_RQMPOCR                  0x44
 #define MTL_Q_RQDR                     0x4c
+#define MTL_Q_RQFCR                    0x50
 #define MTL_Q_IER                      0x70
 #define MTL_Q_ISR                      0x74
 
 /* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQFCR_RFA_INDEX          1
+#define MTL_Q_RQFCR_RFA_WIDTH          6
+#define MTL_Q_RQFCR_RFD_INDEX          17
+#define MTL_Q_RQFCR_RFD_WIDTH          6
 #define MTL_Q_RQOMR_EHFC_INDEX         7
 #define MTL_Q_RQOMR_EHFC_WIDTH         1
-#define MTL_Q_RQOMR_RFA_INDEX          8
-#define MTL_Q_RQOMR_RFA_WIDTH          3
-#define MTL_Q_RQOMR_RFD_INDEX          13
-#define MTL_Q_RQOMR_RFD_WIDTH          3
 #define MTL_Q_RQOMR_RQS_INDEX          16
 #define MTL_Q_RQOMR_RQS_WIDTH          9
 #define MTL_Q_RQOMR_RSF_INDEX          5
index 76479d04b9037370ebb91cd161574c793eaf881b..2c063b60db4b02bc48246887fb5d98f6b3de0394 100644 (file)
@@ -328,7 +328,7 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
 
        buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
        pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
-       if (pdata->xgbe_debugfs == NULL) {
+       if (!pdata->xgbe_debugfs) {
                netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
                return;
        }
index a50891f521978ff67d55959fbfab1b63da762f4c..d81fc6bd4759064f72d716e3cd81f0045f8d1bc9 100644 (file)
@@ -422,7 +422,6 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
 
                ring->cur = 0;
                ring->dirty = 0;
-               memset(&ring->rx, 0, sizeof(ring->rx));
 
                hw_if->rx_desc_init(channel);
        }
@@ -621,35 +620,6 @@ err_out:
        return 0;
 }
 
-static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
-{
-       struct xgbe_prv_data *pdata = channel->pdata;
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       struct xgbe_ring *ring = channel->rx_ring;
-       struct xgbe_ring_data *rdata;
-       int i;
-
-       DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
-             ring->rx.realloc_index);
-
-       for (i = 0; i < ring->dirty; i++) {
-               rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
-
-               /* Reset rdata values */
-               xgbe_unmap_rdata(pdata, rdata);
-
-               if (xgbe_map_rx_buffer(pdata, ring, rdata))
-                       break;
-
-               hw_if->rx_desc_reset(rdata);
-
-               ring->rx.realloc_index++;
-       }
-       ring->dirty = 0;
-
-       DBGPR("<--xgbe_realloc_rx_buffer\n");
-}
-
 void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
 {
        DBGPR("-->xgbe_init_function_ptrs_desc\n");
@@ -657,7 +627,7 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
        desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
        desc_if->free_ring_resources = xgbe_free_ring_resources;
        desc_if->map_tx_skb = xgbe_map_tx_skb;
-       desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
+       desc_if->map_rx_buffer = xgbe_map_rx_buffer;
        desc_if->unmap_rdata = xgbe_unmap_rdata;
        desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
        desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
index 53f5f66ec2ee43fe5533697860f70d49a458b919..400757b49872704f6b236579efaf048ad461c2e9 100644 (file)
  */
 
 #include <linux/phy.h>
+#include <linux/mdio.h>
 #include <linux/clk.h>
 #include <linux/bitrev.h>
 #include <linux/crc32.h>
@@ -130,7 +131,7 @@ static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
 
        DBGPR("-->xgbe_usec_to_riwt\n");
 
-       rate = clk_get_rate(pdata->sysclk);
+       rate = pdata->sysclk_rate;
 
        /*
         * Convert the input usec value to the watchdog timer value. Each
@@ -153,7 +154,7 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
 
        DBGPR("-->xgbe_riwt_to_usec\n");
 
-       rate = clk_get_rate(pdata->sysclk);
+       rate = pdata->sysclk_rate;
 
        /*
         * Convert the input watchdog timer value to the usec value. Each
@@ -673,6 +674,9 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
 
 static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
 {
+       if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3)
+               return 0;
+
        XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
 
        return 0;
@@ -680,6 +684,9 @@ static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
 
 static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
 {
+       if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2)
+               return 0;
+
        XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
 
        return 0;
@@ -687,6 +694,9 @@ static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
 
 static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
 {
+       if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0)
+               return 0;
+
        XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
 
        return 0;
@@ -881,6 +891,23 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
        else
                mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
 
+       /* If the PCS is changing modes, match the MAC speed to it */
+       if (((mmd_address >> 16) == MDIO_MMD_PCS) &&
+           ((mmd_address & 0xffff) == MDIO_CTRL2)) {
+               struct phy_device *phydev = pdata->phydev;
+
+               if (mmd_data & MDIO_PCS_CTRL2_TYPE) {
+                       /* KX mode */
+                       if (phydev->supported & SUPPORTED_1000baseKX_Full)
+                               xgbe_set_gmii_speed(pdata);
+                       else
+                               xgbe_set_gmii_2500_speed(pdata);
+               } else {
+                       /* KR mode */
+                       xgbe_set_xgmii_speed(pdata);
+               }
+       }
+
        /* The PCS registers are accessed using mmio. The underlying APB3
         * management interface uses indirect addressing to access the MMD
         * register sets. This requires accessing of the PCS register in two
@@ -1359,6 +1386,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
        unsigned int tso_context, vlan_context;
        unsigned int tx_set_ic;
        int start_index = ring->cur;
+       int cur_index = ring->cur;
        int i;
 
        DBGPR("-->xgbe_dev_xmit\n");
@@ -1401,7 +1429,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
        else
                tx_set_ic = 0;
 
-       rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+       rdata = XGBE_GET_DESC_DATA(ring, cur_index);
        rdesc = rdata->rdesc;
 
        /* Create a context descriptor if this is a TSO packet */
@@ -1444,8 +1472,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
                        ring->tx.cur_vlan_ctag = packet->vlan_ctag;
                }
 
-               ring->cur++;
-               rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+               cur_index++;
+               rdata = XGBE_GET_DESC_DATA(ring, cur_index);
                rdesc = rdata->rdesc;
        }
 
@@ -1473,7 +1501,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
        XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
 
        /* Set OWN bit if not the first descriptor */
-       if (ring->cur != start_index)
+       if (cur_index != start_index)
                XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
 
        if (tso) {
@@ -1497,9 +1525,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
                                  packet->length);
        }
 
-       for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
-               ring->cur++;
-               rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+       for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
+               cur_index++;
+               rdata = XGBE_GET_DESC_DATA(ring, cur_index);
                rdesc = rdata->rdesc;
 
                /* Update buffer address */
@@ -1551,7 +1579,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
        /* Make sure ownership is written to the descriptor */
        wmb();
 
-       ring->cur++;
+       ring->cur = cur_index + 1;
        if (!packet->skb->xmit_more ||
            netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
                                                   channel->queue_index)))
@@ -2079,10 +2107,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
 
        for (i = 0; i < pdata->rx_q_count; i++) {
                /* Activate flow control when less than 4k left in fifo */
-               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
+               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
 
                /* De-activate flow control when more than 6k left in fifo */
-               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
+               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
        }
 }
 
@@ -2107,6 +2135,23 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
        XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
 }
 
+static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
+{
+       switch (pdata->phy_speed) {
+       case SPEED_10000:
+               xgbe_set_xgmii_speed(pdata);
+               break;
+
+       case SPEED_2500:
+               xgbe_set_gmii_2500_speed(pdata);
+               break;
+
+       case SPEED_1000:
+               xgbe_set_gmii_speed(pdata);
+               break;
+       }
+}
+
 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
 {
        if (pdata->netdev->features & NETIF_F_RXCSUM)
@@ -2757,6 +2802,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
        xgbe_config_mac_address(pdata);
        xgbe_config_jumbo_enable(pdata);
        xgbe_config_flow_control(pdata);
+       xgbe_config_mac_speed(pdata);
        xgbe_config_checksum_offload(pdata);
        xgbe_config_vlan_support(pdata);
        xgbe_config_mmc(pdata);
index 7bb5f07dbeef3e806174047cc883e3fb6ca149c3..d41f9f4686882722e71452fbc5725b3d9d8e7aca 100644 (file)
@@ -225,6 +225,11 @@ static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
        return (ring->rdesc_count - (ring->cur - ring->dirty));
 }
 
+static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
+{
+       return (ring->cur - ring->dirty);
+}
+
 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
                                    struct xgbe_ring *ring, unsigned int count)
 {
@@ -410,17 +415,13 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
        struct xgbe_channel *channel = container_of(timer,
                                                    struct xgbe_channel,
                                                    tx_timer);
-       struct xgbe_ring *ring = channel->tx_ring;
        struct xgbe_prv_data *pdata = channel->pdata;
        struct napi_struct *napi;
-       unsigned long flags;
 
        DBGPR("-->xgbe_tx_timer\n");
 
        napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
 
-       spin_lock_irqsave(&ring->lock, flags);
-
        if (napi_schedule_prep(napi)) {
                /* Disable Tx and Rx interrupts */
                if (pdata->per_channel_irq)
@@ -434,8 +435,6 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
 
        channel->tx_timer_active = 0;
 
-       spin_unlock_irqrestore(&ring->lock, flags);
-
        DBGPR("<--xgbe_tx_timer\n");
 
        return HRTIMER_NORESTART;
@@ -523,6 +522,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
        hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
        hw_feat->tso           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
        hw_feat->dma_debug     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+       hw_feat->rss           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
        hw_feat->tc_cnt        = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
        hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
                                                  HASHTBLSZ);
@@ -552,13 +552,14 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
                break;
        }
 
-       /* The Queue and Channel counts are zero based so increment them
+       /* The Queue, Channel and TC counts are zero based so increment them
         * to get the actual number
         */
        hw_feat->rx_q_cnt++;
        hw_feat->tx_q_cnt++;
        hw_feat->rx_ch_cnt++;
        hw_feat->tx_ch_cnt++;
+       hw_feat->tc_cnt++;
 
        DBGPR("<--xgbe_get_all_hw_features\n");
 }
@@ -692,7 +693,7 @@ static void xgbe_adjust_link(struct net_device *netdev)
        struct phy_device *phydev = pdata->phydev;
        int new_state = 0;
 
-       if (phydev == NULL)
+       if (!phydev)
                return;
 
        if (phydev->link) {
@@ -927,7 +928,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
        DBGPR("<--xgbe_stop\n");
 }
 
-static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
+static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
 {
        struct xgbe_channel *channel;
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -950,9 +951,8 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
        xgbe_free_tx_data(pdata);
        xgbe_free_rx_data(pdata);
 
-       /* Issue software reset to device if requested */
-       if (reset)
-               hw_if->exit(pdata);
+       /* Issue software reset to device */
+       hw_if->exit(pdata);
 
        xgbe_start(pdata);
 
@@ -967,7 +967,7 @@ static void xgbe_restart(struct work_struct *work)
 
        rtnl_lock();
 
-       xgbe_restart_dev(pdata, 1);
+       xgbe_restart_dev(pdata);
 
        rtnl_unlock();
 }
@@ -1165,8 +1165,8 @@ static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
 
 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
 {
-       if (vlan_tx_tag_present(skb))
-               packet->vlan_ctag = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb))
+               packet->vlan_ctag = skb_vlan_tag_get(skb);
 }
 
 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
@@ -1247,9 +1247,9 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
                XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
                               CSUM_ENABLE, 1);
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                /* VLAN requires an extra descriptor if tag is different */
-               if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
+               if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
                        /* We can share with the TSO context descriptor */
                        if (!context_desc) {
                                context_desc = 1;
@@ -1446,7 +1446,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
        struct xgbe_ring *ring;
        struct xgbe_packet_data *packet;
        struct netdev_queue *txq;
-       unsigned long flags;
        int ret;
 
        DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
@@ -1458,8 +1457,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        ret = NETDEV_TX_OK;
 
-       spin_lock_irqsave(&ring->lock, flags);
-
        if (skb->len == 0) {
                netdev_err(netdev, "empty skb received from stack\n");
                dev_kfree_skb_any(skb);
@@ -1506,10 +1503,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
        ret = NETDEV_TX_OK;
 
 tx_netdev_return:
-       spin_unlock_irqrestore(&ring->lock, flags);
-
-       DBGPR("<--xgbe_xmit\n");
-
        return ret;
 }
 
@@ -1587,7 +1580,7 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
        pdata->rx_buf_size = ret;
        netdev->mtu = mtu;
 
-       xgbe_restart_dev(pdata, 0);
+       xgbe_restart_dev(pdata);
 
        DBGPR("<--xgbe_change_mtu\n");
 
@@ -1776,15 +1769,28 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
 static void xgbe_rx_refresh(struct xgbe_channel *channel)
 {
        struct xgbe_prv_data *pdata = channel->pdata;
+       struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct xgbe_desc_if *desc_if = &pdata->desc_if;
        struct xgbe_ring *ring = channel->rx_ring;
        struct xgbe_ring_data *rdata;
 
-       desc_if->realloc_rx_buffer(channel);
+       while (ring->dirty != ring->cur) {
+               rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
+
+               /* Reset rdata values */
+               desc_if->unmap_rdata(pdata, rdata);
+
+               if (desc_if->map_rx_buffer(pdata, ring, rdata))
+                       break;
+
+               hw_if->rx_desc_reset(rdata);
+
+               ring->dirty++;
+       }
 
        /* Update the Rx Tail Pointer Register with address of
         * the last cleaned entry */
-       rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
+       rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
        XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
                          lower_32_bits(rdata->rdesc_dma));
 }
@@ -1824,7 +1830,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
        struct xgbe_ring_desc *rdesc;
        struct net_device *netdev = pdata->netdev;
        struct netdev_queue *txq;
-       unsigned long flags;
        int processed = 0;
        unsigned int tx_packets = 0, tx_bytes = 0;
 
@@ -1836,8 +1841,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
 
        txq = netdev_get_tx_queue(netdev, channel->queue_index);
 
-       spin_lock_irqsave(&ring->lock, flags);
-
        while ((processed < XGBE_TX_DESC_MAX_PROC) &&
               (ring->dirty != ring->cur)) {
                rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
@@ -1868,7 +1871,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
        }
 
        if (!processed)
-               goto unlock;
+               return 0;
 
        netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
 
@@ -1880,9 +1883,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
 
        DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
 
-unlock:
-       spin_unlock_irqrestore(&ring->lock, flags);
-
        return processed;
 }
 
@@ -1934,7 +1934,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
 read_again:
                rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
 
-               if (ring->dirty > (XGBE_RX_DESC_CNT >> 3))
+               if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
                        xgbe_rx_refresh(channel);
 
                if (hw_if->dev_read(channel))
@@ -1942,7 +1942,6 @@ read_again:
 
                received++;
                ring->cur++;
-               ring->dirty++;
 
                incomplete = XGMAC_GET_BITS(packet->attributes,
                                            RX_PACKET_ATTRIBUTES,
index dbd3850b8b0a8f61053026a887e5a90f7a3aef53..32dd6513705117cdc434800fc68b6c52e750ed78 100644 (file)
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_net.h>
+#include <linux/of_address.h>
 #include <linux/clk.h>
+#include <linux/property.h>
+#include <linux/acpi.h>
 
 #include "xgbe.h"
 #include "xgbe-common.h"
@@ -148,6 +151,7 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
        pdata->pause_autoneg = 1;
        pdata->tx_pause = 1;
        pdata->rx_pause = 1;
+       pdata->phy_speed = SPEED_UNKNOWN;
        pdata->power_down = 0;
        pdata->default_autoneg = AUTONEG_ENABLE;
        pdata->default_speed = SPEED_10000;
@@ -161,6 +165,96 @@ static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
        xgbe_init_function_ptrs_desc(&pdata->desc_if);
 }
 
+#ifdef CONFIG_ACPI
+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
+{
+       struct acpi_device *adev = pdata->adev;
+       struct device *dev = pdata->dev;
+       u32 property;
+       acpi_handle handle;
+       acpi_status status;
+       unsigned long long data;
+       int cca;
+       int ret;
+
+       /* Obtain the system clock setting */
+       ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
+       if (ret) {
+               dev_err(dev, "unable to obtain %s property\n",
+                       XGBE_ACPI_DMA_FREQ);
+               return ret;
+       }
+       pdata->sysclk_rate = property;
+
+       /* Obtain the PTP clock setting */
+       ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
+       if (ret) {
+               dev_err(dev, "unable to obtain %s property\n",
+                       XGBE_ACPI_PTP_FREQ);
+               return ret;
+       }
+       pdata->ptpclk_rate = property;
+
+       /* Retrieve the device cache coherency value */
+       handle = adev->handle;
+       do {
+               status = acpi_evaluate_integer(handle, "_CCA", NULL, &data);
+               if (!ACPI_FAILURE(status)) {
+                       cca = data;
+                       break;
+               }
+
+               status = acpi_get_parent(handle, &handle);
+       } while (!ACPI_FAILURE(status));
+
+       if (ACPI_FAILURE(status)) {
+               dev_err(dev, "error obtaining acpi coherency value\n");
+               return -EINVAL;
+       }
+       pdata->coherent = !!cca;
+
+       return 0;
+}
+#else   /* CONFIG_ACPI */
+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
+{
+       return -EINVAL;
+}
+#endif  /* CONFIG_ACPI */
+
+#ifdef CONFIG_OF
+static int xgbe_of_support(struct xgbe_prv_data *pdata)
+{
+       struct device *dev = pdata->dev;
+
+       /* Obtain the system clock setting */
+       pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
+       if (IS_ERR(pdata->sysclk)) {
+               dev_err(dev, "dma devm_clk_get failed\n");
+               return PTR_ERR(pdata->sysclk);
+       }
+       pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
+
+       /* Obtain the PTP clock setting */
+       pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
+       if (IS_ERR(pdata->ptpclk)) {
+               dev_err(dev, "ptp devm_clk_get failed\n");
+               return PTR_ERR(pdata->ptpclk);
+       }
+       pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
+
+       /* Retrieve the device cache coherency value */
+       pdata->coherent = of_dma_is_coherent(dev->of_node);
+
+       return 0;
+}
+#else   /* CONFIG_OF */
+static int xgbe_of_support(struct xgbe_prv_data *pdata)
+{
+       return -EINVAL;
+}
+#endif  /*CONFIG_OF */
+
 static int xgbe_probe(struct platform_device *pdev)
 {
        struct xgbe_prv_data *pdata;
@@ -169,7 +263,7 @@ static int xgbe_probe(struct platform_device *pdev)
        struct net_device *netdev;
        struct device *dev = &pdev->dev;
        struct resource *res;
-       const u8 *mac_addr;
+       const char *phy_mode;
        unsigned int i;
        int ret;
 
@@ -186,6 +280,7 @@ static int xgbe_probe(struct platform_device *pdev)
        pdata = netdev_priv(netdev);
        pdata->netdev = netdev;
        pdata->pdev = pdev;
+       pdata->adev = ACPI_COMPANION(dev);
        pdata->dev = dev;
        platform_set_drvdata(pdev, netdev);
 
@@ -194,6 +289,9 @@ static int xgbe_probe(struct platform_device *pdev)
        mutex_init(&pdata->rss_mutex);
        spin_lock_init(&pdata->tstamp_lock);
 
+       /* Check if we should use ACPI or DT */
+       pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
+
        /* Set and validate the number of descriptors for a ring */
        BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
        pdata->tx_desc_count = XGBE_TX_DESC_CNT;
@@ -212,22 +310,6 @@ static int xgbe_probe(struct platform_device *pdev)
                goto err_io;
        }
 
-       /* Obtain the system clock setting */
-       pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
-       if (IS_ERR(pdata->sysclk)) {
-               dev_err(dev, "dma devm_clk_get failed\n");
-               ret = PTR_ERR(pdata->sysclk);
-               goto err_io;
-       }
-
-       /* Obtain the PTP clock setting */
-       pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
-       if (IS_ERR(pdata->ptpclk)) {
-               dev_err(dev, "ptp devm_clk_get failed\n");
-               ret = PTR_ERR(pdata->ptpclk);
-               goto err_io;
-       }
-
        /* Obtain the mmio areas for the device */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        pdata->xgmac_regs = devm_ioremap_resource(dev, res);
@@ -247,16 +329,42 @@ static int xgbe_probe(struct platform_device *pdev)
        }
        DBGPR("  xpcs_regs  = %p\n", pdata->xpcs_regs);
 
-       /* Set the DMA mask */
-       if (!dev->dma_mask)
-               dev->dma_mask = &dev->coherent_dma_mask;
-       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
-       if (ret) {
-               dev_err(dev, "dma_set_mask_and_coherent failed\n");
+       /* Retrieve the MAC address */
+       ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
+                                           pdata->mac_addr,
+                                           sizeof(pdata->mac_addr));
+       if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
+               dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
+               if (!ret)
+                       ret = -EINVAL;
                goto err_io;
        }
 
-       if (of_property_read_bool(dev->of_node, "dma-coherent")) {
+       /* Retrieve the PHY mode - it must be "xgmii" */
+       ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
+                                         &phy_mode);
+       if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
+               dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
+               if (!ret)
+                       ret = -EINVAL;
+               goto err_io;
+       }
+       pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
+
+       /* Check for per channel interrupt support */
+       if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
+               pdata->per_channel_irq = 1;
+
+       /* Obtain device settings unique to ACPI/OF */
+       if (pdata->use_acpi)
+               ret = xgbe_acpi_support(pdata);
+       else
+               ret = xgbe_of_support(pdata);
+       if (ret)
+               goto err_io;
+
+       /* Set the DMA coherency values */
+       if (pdata->coherent) {
                pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
                pdata->arcache = XGBE_DMA_OS_ARCACHE;
                pdata->awcache = XGBE_DMA_OS_AWCACHE;
@@ -266,10 +374,16 @@ static int xgbe_probe(struct platform_device *pdev)
                pdata->awcache = XGBE_DMA_SYS_AWCACHE;
        }
 
-       /* Check for per channel interrupt support */
-       if (of_property_read_bool(dev->of_node, XGBE_DMA_IRQS))
-               pdata->per_channel_irq = 1;
+       /* Set the DMA mask */
+       if (!dev->dma_mask)
+               dev->dma_mask = &dev->coherent_dma_mask;
+       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+       if (ret) {
+               dev_err(dev, "dma_set_mask_and_coherent failed\n");
+               goto err_io;
+       }
 
+       /* Get the device interrupt */
        ret = platform_get_irq(pdev, 0);
        if (ret < 0) {
                dev_err(dev, "platform_get_irq 0 failed\n");
@@ -279,6 +393,7 @@ static int xgbe_probe(struct platform_device *pdev)
 
        netdev->irq = pdata->dev_irq;
        netdev->base_addr = (unsigned long)pdata->xgmac_regs;
+       memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
 
        /* Set all the function pointers */
        xgbe_init_all_fptrs(pdata);
@@ -291,23 +406,6 @@ static int xgbe_probe(struct platform_device *pdev)
        /* Populate the hardware features */
        xgbe_get_all_hw_features(pdata);
 
-       /* Retrieve the MAC address */
-       mac_addr = of_get_mac_address(dev->of_node);
-       if (!mac_addr) {
-               dev_err(dev, "invalid mac address for this device\n");
-               ret = -EINVAL;
-               goto err_io;
-       }
-       memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
-
-       /* Retrieve the PHY mode - it must be "xgmii" */
-       pdata->phy_mode = of_get_phy_mode(dev->of_node);
-       if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
-               dev_err(dev, "invalid phy-mode specified for this device\n");
-               ret = -EINVAL;
-               goto err_io;
-       }
-
        /* Set default configuration data */
        xgbe_default_config(pdata);
 
@@ -491,18 +589,35 @@ static int xgbe_resume(struct device *dev)
 }
 #endif /* CONFIG_PM */
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgbe_acpi_match[] = {
+       { "AMDI8001", 0 },
+       {},
+};
+
+MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
 static const struct of_device_id xgbe_of_match[] = {
        { .compatible = "amd,xgbe-seattle-v1a", },
        {},
 };
 
 MODULE_DEVICE_TABLE(of, xgbe_of_match);
+#endif
+
 static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
 
 static struct platform_driver xgbe_driver = {
        .driver = {
                .name = "amd-xgbe",
+#ifdef CONFIG_ACPI
+               .acpi_match_table = xgbe_acpi_match,
+#endif
+#ifdef CONFIG_OF
                .of_match_table = xgbe_of_match,
+#endif
                .pm = &xgbe_pm_ops,
        },
        .probe = xgbe_probe,
index 363b210560f332e08837e9bfb9426dcea62707a4..59e267f3f1b77e1ca34cbd0eaca7c760653450b4 100644 (file)
@@ -205,25 +205,16 @@ void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
 
 int xgbe_mdio_register(struct xgbe_prv_data *pdata)
 {
-       struct device_node *phy_node;
        struct mii_bus *mii;
        struct phy_device *phydev;
        int ret = 0;
 
        DBGPR("-->xgbe_mdio_register\n");
 
-       /* Retrieve the phy-handle */
-       phy_node = of_parse_phandle(pdata->dev->of_node, "phy-handle", 0);
-       if (!phy_node) {
-               dev_err(pdata->dev, "unable to parse phy-handle\n");
-               return -EINVAL;
-       }
-
        mii = mdiobus_alloc();
-       if (mii == NULL) {
+       if (!mii) {
                dev_err(pdata->dev, "mdiobus_alloc failed\n");
-               ret = -ENOMEM;
-               goto err_node_get;
+               return -ENOMEM;
        }
 
        /* Register on the MDIO bus (don't probe any PHYs) */
@@ -252,18 +243,19 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
        request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
                       MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
 
-       of_node_get(phy_node);
-       phydev->dev.of_node = phy_node;
        ret = phy_device_register(phydev);
        if (ret) {
                dev_err(pdata->dev, "phy_device_register failed\n");
-               of_node_put(phy_node);
+               goto err_phy_device;
+       }
+       if (!phydev->dev.driver) {
+               dev_err(pdata->dev, "phy driver probe failed\n");
+               ret = -EIO;
                goto err_phy_device;
        }
 
        /* Add a reference to the PHY driver so it can't be unloaded */
-       pdata->phy_module = phydev->dev.driver ?
-                           phydev->dev.driver->owner : NULL;
+       pdata->phy_module = phydev->dev.driver->owner;
        if (!try_module_get(pdata->phy_module)) {
                dev_err(pdata->dev, "try_module_get failed\n");
                ret = -EIO;
@@ -283,8 +275,6 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
 
        pdata->phydev = phydev;
 
-       of_node_put(phy_node);
-
        DBGPHY_REGS(pdata);
 
        DBGPR("<--xgbe_mdio_register\n");
@@ -300,9 +290,6 @@ err_mdiobus_register:
 err_mdiobus_alloc:
        mdiobus_free(mii);
 
-err_node_get:
-       of_node_put(phy_node);
-
        return ret;
 }
 
index f5acf4cc69bdabbda7b2a489b50ef0b740fc0bf1..f326178ef3760122f034a27d5c5afeda4eb9aa70 100644 (file)
@@ -233,7 +233,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
        snprintf(info->name, sizeof(info->name), "%s",
                 netdev_name(pdata->netdev));
        info->owner = THIS_MODULE;
-       info->max_adj = clk_get_rate(pdata->ptpclk);
+       info->max_adj = pdata->ptpclk_rate;
        info->adjfreq = xgbe_adjfreq;
        info->adjtime = xgbe_adjtime;
        info->gettime = xgbe_gettime;
@@ -254,7 +254,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
         */
        dividend = 50000000;
        dividend <<= 32;
-       pdata->tstamp_addend = div_u64(dividend, clk_get_rate(pdata->ptpclk));
+       pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
 
        /* Setup the timecounter */
        cc->read = xgbe_cc_read;
index 2af6affc35a7c7ac322a62fb692d6ec4e49edd03..13e8f95c077c3c57089e7b7f33c1577efc7143cf 100644 (file)
 #define XGBE_PHY_NAME          "amd_xgbe_phy"
 #define XGBE_PRTAD             0
 
+/* Common property names */
+#define XGBE_MAC_ADDR_PROPERTY "mac-address"
+#define XGBE_PHY_MODE_PROPERTY "phy-mode"
+#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
+
 /* Device-tree clock names */
 #define XGBE_DMA_CLOCK         "dma_clk"
 #define XGBE_PTP_CLOCK         "ptp_clk"
-#define XGBE_DMA_IRQS          "amd,per-channel-interrupt"
+
+/* ACPI property names */
+#define XGBE_ACPI_DMA_FREQ     "amd,dma-freq"
+#define XGBE_ACPI_PTP_FREQ     "amd,ptp-freq"
 
 /* Timestamp support - values based on 50MHz PTP clock
  *   50MHz => 20 nsec
@@ -361,8 +369,7 @@ struct xgbe_ring {
         *  cur   - Tx: index of descriptor to be used for current transfer
         *          Rx: index of descriptor to check for packet availability
         *  dirty - Tx: index of descriptor to check for transfer complete
-        *          Rx: count of descriptors in which a packet has been received
-        *              (used with skb_realloc_index to refresh the ring)
+        *          Rx: index of descriptor to check for buffer reallocation
         */
        unsigned int cur;
        unsigned int dirty;
@@ -377,11 +384,6 @@ struct xgbe_ring {
                        unsigned short cur_mss;
                        unsigned short cur_vlan_ctag;
                } tx;
-
-               struct {
-                       unsigned int realloc_index;
-                       unsigned int realloc_threshold;
-               } rx;
        };
 } ____cacheline_aligned;
 
@@ -596,7 +598,8 @@ struct xgbe_desc_if {
        int (*alloc_ring_resources)(struct xgbe_prv_data *);
        void (*free_ring_resources)(struct xgbe_prv_data *);
        int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
-       void (*realloc_rx_buffer)(struct xgbe_channel *);
+       int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *,
+                            struct xgbe_ring_data *);
        void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
        void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
        void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
@@ -650,8 +653,12 @@ struct xgbe_hw_features {
 struct xgbe_prv_data {
        struct net_device *netdev;
        struct platform_device *pdev;
+       struct acpi_device *adev;
        struct device *dev;
 
+       /* ACPI or DT flag */
+       unsigned int use_acpi;
+
        /* XGMAC/XPCS related mmio registers */
        void __iomem *xgmac_regs;       /* XGMAC CSRs */
        void __iomem *xpcs_regs;        /* XPCS MMD registers */
@@ -672,6 +679,7 @@ struct xgbe_prv_data {
        struct xgbe_desc_if desc_if;
 
        /* AXI DMA settings */
+       unsigned int coherent;
        unsigned int axdomain;
        unsigned int arcache;
        unsigned int awcache;
@@ -739,6 +747,7 @@ struct xgbe_prv_data {
        unsigned int phy_rx_pause;
 
        /* Netdev related settings */
+       unsigned char mac_addr[ETH_ALEN];
        netdev_features_t netdev_features;
        struct napi_struct napi;
        struct xgbe_mmc_stats mmc_stats;
@@ -748,7 +757,9 @@ struct xgbe_prv_data {
 
        /* Device clocks */
        struct clk *sysclk;
+       unsigned long sysclk_rate;
        struct clk *ptpclk;
+       unsigned long ptpclk_rate;
 
        /* Timestamp support */
        spinlock_t tstamp_lock;
index 7ba83ffb08ac73b6437f1fd4a87c3a560b9d4f84..869d97fcf7810ff9abb7a2cc6c7ade6e49ceb2df 100644 (file)
@@ -593,10 +593,12 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
        if (!xgene_ring_mgr_init(pdata))
                return -ENODEV;
 
-       clk_prepare_enable(pdata->clk);
-       clk_disable_unprepare(pdata->clk);
-       clk_prepare_enable(pdata->clk);
-       xgene_enet_ecc_init(pdata);
+       if (!efi_enabled(EFI_BOOT)) {
+               clk_prepare_enable(pdata->clk);
+               clk_disable_unprepare(pdata->clk);
+               clk_prepare_enable(pdata->clk);
+               xgene_enet_ecc_init(pdata);
+       }
        xgene_enet_config_ring_if_assoc(pdata);
 
        /* Enable auto-incr for scanning */
@@ -663,15 +665,20 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
        struct phy_device *phy_dev;
        struct device *dev = &pdata->pdev->dev;
 
-       phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
-       if (!phy_np) {
-               netdev_dbg(ndev, "No phy-handle found\n");
-               return -ENODEV;
+       if (dev->of_node) {
+               phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
+               if (!phy_np) {
+                       netdev_dbg(ndev, "No phy-handle found in DT\n");
+                       return -ENODEV;
+               }
+               pdata->phy_dev = of_phy_find_device(phy_np);
        }
 
-       phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
-                                0, pdata->phy_mode);
-       if (!phy_dev) {
+       phy_dev = pdata->phy_dev;
+
+       if (!phy_dev ||
+           phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
+                              pdata->phy_mode)) {
                netdev_err(ndev, "Could not connect to PHY\n");
                return  -ENODEV;
        }
@@ -681,32 +688,71 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
                              ~SUPPORTED_100baseT_Half &
                              ~SUPPORTED_1000baseT_Half;
        phy_dev->advertising = phy_dev->supported;
-       pdata->phy_dev = phy_dev;
 
        return 0;
 }
 
-int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
+static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
+                                 struct mii_bus *mdio)
 {
-       struct net_device *ndev = pdata->ndev;
        struct device *dev = &pdata->pdev->dev;
+       struct net_device *ndev = pdata->ndev;
+       struct phy_device *phy;
        struct device_node *child_np;
        struct device_node *mdio_np = NULL;
-       struct mii_bus *mdio_bus;
        int ret;
+       u32 phy_id;
+
+       if (dev->of_node) {
+               for_each_child_of_node(dev->of_node, child_np) {
+                       if (of_device_is_compatible(child_np,
+                                                   "apm,xgene-mdio")) {
+                               mdio_np = child_np;
+                               break;
+                       }
+               }
 
-       for_each_child_of_node(dev->of_node, child_np) {
-               if (of_device_is_compatible(child_np, "apm,xgene-mdio")) {
-                       mdio_np = child_np;
-                       break;
+               if (!mdio_np) {
+                       netdev_dbg(ndev, "No mdio node in the dts\n");
+                       return -ENXIO;
                }
-       }
 
-       if (!mdio_np) {
-               netdev_dbg(ndev, "No mdio node in the dts\n");
-               return -ENXIO;
+               return of_mdiobus_register(mdio, mdio_np);
        }
 
+       /* Mask out all PHYs from auto probing. */
+       mdio->phy_mask = ~0;
+
+       /* Register the MDIO bus */
+       ret = mdiobus_register(mdio);
+       if (ret)
+               return ret;
+
+       ret = device_property_read_u32(dev, "phy-channel", &phy_id);
+       if (ret)
+               ret = device_property_read_u32(dev, "phy-addr", &phy_id);
+       if (ret)
+               return -EINVAL;
+
+       phy = get_phy_device(mdio, phy_id, true);
+       if (!phy || IS_ERR(phy))
+               return -EIO;
+
+       ret = phy_device_register(phy);
+       if (ret)
+               phy_device_free(phy);
+       else
+               pdata->phy_dev = phy;
+
+       return ret;
+}
+
+int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
+{
+       struct net_device *ndev = pdata->ndev;
+       struct mii_bus *mdio_bus;
+       int ret;
+
        mdio_bus = mdiobus_alloc();
        if (!mdio_bus)
                return -ENOMEM;
@@ -720,7 +766,7 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
        mdio_bus->priv = pdata;
        mdio_bus->parent = &ndev->dev;
 
-       ret = of_mdiobus_register(mdio_bus, mdio_np);
+       ret = xgene_mdiobus_register(pdata, mdio_bus);
        if (ret) {
                netdev_err(ndev, "Failed to register MDIO bus\n");
                mdiobus_free(mdio_bus);
index 83a50280bb7098149624f4bfe341822845e2148f..44b15373d6b3e628a0198f888c163a5cbc1b9544 100644 (file)
 #include "xgene_enet_sgmac.h"
 #include "xgene_enet_xgmac.h"
 
+#define RES_ENET_CSR   0
+#define RES_RING_CSR   1
+#define RES_RING_CMD   2
+
 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
 {
        struct xgene_enet_raw_desc16 *raw_desc;
@@ -369,6 +373,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
                if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
                        break;
 
+               /* read fpqnum field after dataaddr field */
+               dma_rmb();
                if (is_rx_desc(raw_desc))
                        ret = xgene_enet_rx_frame(ring, raw_desc);
                else
@@ -746,6 +752,41 @@ static const struct net_device_ops xgene_ndev_ops = {
        .ndo_set_mac_address = xgene_enet_set_mac_address,
 };
 
+static int xgene_get_mac_address(struct device *dev,
+                                unsigned char *addr)
+{
+       int ret;
+
+       ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
+       if (ret)
+               ret = device_property_read_u8_array(dev, "mac-address",
+                                                   addr, 6);
+       if (ret)
+               return -ENODEV;
+
+       return ETH_ALEN;
+}
+
+static int xgene_get_phy_mode(struct device *dev)
+{
+       int i, ret;
+       char *modestr;
+
+       ret = device_property_read_string(dev, "phy-connection-type",
+                                         (const char **)&modestr);
+       if (ret)
+               ret = device_property_read_string(dev, "phy-mode",
+                                                 (const char **)&modestr);
+       if (ret)
+               return -ENODEV;
+
+       for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
+               if (!strcasecmp(modestr, phy_modes(i)))
+                       return i;
+       }
+       return -ENODEV;
+}
+
 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
 {
        struct platform_device *pdev;
@@ -753,32 +794,45 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
        struct device *dev;
        struct resource *res;
        void __iomem *base_addr;
-       const char *mac;
        int ret;
 
        pdev = pdata->pdev;
        dev = &pdev->dev;
        ndev = pdata->ndev;
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr");
-       pdata->base_addr = devm_ioremap_resource(dev, res);
-       if (IS_ERR(pdata->base_addr)) {
+       res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
+       if (!res) {
+               dev_err(dev, "Resource enet_csr not defined\n");
+               return -ENODEV;
+       }
+       pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
+       if (!pdata->base_addr) {
                dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
-               return PTR_ERR(pdata->base_addr);
+               return -ENOMEM;
        }
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr");
-       pdata->ring_csr_addr = devm_ioremap_resource(dev, res);
-       if (IS_ERR(pdata->ring_csr_addr)) {
+       res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
+       if (!res) {
+               dev_err(dev, "Resource ring_csr not defined\n");
+               return -ENODEV;
+       }
+       pdata->ring_csr_addr = devm_ioremap(dev, res->start,
+                                                       resource_size(res));
+       if (!pdata->ring_csr_addr) {
                dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
-               return PTR_ERR(pdata->ring_csr_addr);
+               return -ENOMEM;
        }
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd");
-       pdata->ring_cmd_addr = devm_ioremap_resource(dev, res);
-       if (IS_ERR(pdata->ring_cmd_addr)) {
+       res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
+       if (!res) {
+               dev_err(dev, "Resource ring_cmd not defined\n");
+               return -ENODEV;
+       }
+       pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
+                                                       resource_size(res));
+       if (!pdata->ring_cmd_addr) {
                dev_err(dev, "Unable to retrieve ENET Ring command region\n");
-               return PTR_ERR(pdata->ring_cmd_addr);
+               return -ENOMEM;
        }
 
        ret = platform_get_irq(pdev, 0);
@@ -789,14 +843,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
        }
        pdata->rx_irq = ret;
 
-       mac = of_get_mac_address(dev->of_node);
-       if (mac)
-               memcpy(ndev->dev_addr, mac, ndev->addr_len);
-       else
+       if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
                eth_hw_addr_random(ndev);
+
        memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
 
-       pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node);
+       pdata->phy_mode = xgene_get_phy_mode(dev);
        if (pdata->phy_mode < 0) {
                dev_err(dev, "Unable to get phy-connection-type\n");
                return pdata->phy_mode;
@@ -809,11 +861,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
        }
 
        pdata->clk = devm_clk_get(&pdev->dev, NULL);
-       ret = IS_ERR(pdata->clk);
        if (IS_ERR(pdata->clk)) {
-               dev_err(&pdev->dev, "can't get clock\n");
-               ret = PTR_ERR(pdata->clk);
-               return ret;
+               /* Firmware may have set up the clock already. */
+               pdata->clk = NULL;
        }
 
        base_addr = pdata->base_addr;
@@ -924,7 +974,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
                goto err;
        }
 
-       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
        if (ret) {
                netdev_err(ndev, "No usable DMA configuration\n");
                goto err;
@@ -972,17 +1022,26 @@ static int xgene_enet_remove(struct platform_device *pdev)
        return 0;
 }
 
-static struct of_device_id xgene_enet_match[] = {
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_enet_acpi_match[] = {
+       { "APMC0D05", },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
+#endif
+
+static struct of_device_id xgene_enet_of_match[] = {
        {.compatible = "apm,xgene-enet",},
        {},
 };
 
-MODULE_DEVICE_TABLE(of, xgene_enet_match);
+MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
 
 static struct platform_driver xgene_enet_driver = {
        .driver = {
                   .name = "xgene-enet",
-                  .of_match_table = xgene_enet_match,
+                  .of_match_table = of_match_ptr(xgene_enet_of_match),
+                  .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
        },
        .probe = xgene_enet_probe,
        .remove = xgene_enet_remove,
index f9958fae6ffdc9fcba7cbb8f8438d95024b40ac9..c2d465c3db66b15eaf7ddf313106605a997fb170 100644 (file)
 #ifndef __XGENE_ENET_MAIN_H__
 #define __XGENE_ENET_MAIN_H__
 
+#include <linux/acpi.h>
 #include <linux/clk.h>
+#include <linux/efi.h>
+#include <linux/io.h>
 #include <linux/of_platform.h>
 #include <linux/of_net.h>
 #include <linux/of_mdio.h>
index e398eda0729832671561490c6d18fce9db485f5e..c8af3ce3ea38d16d4c470ec5773b2d4f088b0f15 100644 (file)
@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
        schedule_work(&alx->reset_wk);
 }
 
-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
 {
        struct alx_rx_queue *rxq = &alx->rxq;
        struct alx_rrd *rrd;
        struct alx_buffer *rxb;
        struct sk_buff *skb;
        u16 length, rfd_cleaned = 0;
+       int work = 0;
 
-       while (budget > 0) {
+       while (work < budget) {
                rrd = &rxq->rrd[rxq->rrd_read_idx];
                if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
                        break;
@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
                    ALX_GET_FIELD(le32_to_cpu(rrd->word0),
                                  RRD_NOR) != 1) {
                        alx_schedule_reset(alx);
-                       return 0;
+                       return work;
                }
 
                rxb = &rxq->bufs[rxq->read_idx];
@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
                }
 
                napi_gro_receive(&alx->napi, skb);
-               budget--;
+               work++;
 
 next_pkt:
                if (++rxq->read_idx == alx->rx_ringsz)
@@ -258,21 +259,22 @@ next_pkt:
        if (rfd_cleaned)
                alx_refill_rx_ring(alx, GFP_ATOMIC);
 
-       return budget > 0;
+       return work;
 }
 
 static int alx_poll(struct napi_struct *napi, int budget)
 {
        struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
        struct alx_hw *hw = &alx->hw;
-       bool complete = true;
        unsigned long flags;
+       bool tx_complete;
+       int work;
 
-       complete = alx_clean_tx_irq(alx) &&
-                  alx_clean_rx_irq(alx, budget);
+       tx_complete = alx_clean_tx_irq(alx);
+       work = alx_clean_rx_irq(alx, budget);
 
-       if (!complete)
-               return 1;
+       if (!tx_complete || work == budget)
+               return budget;
 
        napi_complete(&alx->napi);
 
@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
 
        alx_post_write(hw);
 
-       return 0;
+       return work;
 }
 
 static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
index c9946c6c119e31f24b50a4337bdb770826ae8cae..587f63e87588f73a3e310066f3fd7494d1259e3f 100644 (file)
@@ -2235,8 +2235,8 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
-       if (unlikely(vlan_tx_tag_present(skb))) {
-               u16 vlan = vlan_tx_tag_get(skb);
+       if (unlikely(skb_vlan_tag_present(skb))) {
+               u16 vlan = skb_vlan_tag_get(skb);
                __le16 tag;
 
                vlan = cpu_to_le16(vlan);
index c88abf5b64154c90dbcaeb536581b8da839b1713..59a03a193e8354285f838f6f47c3bc081cc18bdf 100644 (file)
@@ -1892,8 +1892,8 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
 
        tpd = atl1e_get_tpd(adapter);
 
-       if (vlan_tx_tag_present(skb)) {
-               u16 vlan_tag = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               u16 vlan_tag = skb_vlan_tag_get(skb);
                u16 atl1e_vlan_tag;
 
                tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
index 2c8f398aeda9228dc65255cd642759b8e557b256..eca1d113fee187ccb310fbd6c3ba2c4b535a9114 100644 (file)
@@ -2415,8 +2415,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
                (u16) atomic_read(&tpd_ring->next_to_use));
        memset(ptpd, 0, sizeof(struct tx_packet_desc));
 
-       if (vlan_tx_tag_present(skb)) {
-               vlan_tag = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               vlan_tag = skb_vlan_tag_get(skb);
                vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
                        ((vlan_tag >> 9) & 0x8);
                ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
index 482a7cabb0a1421802e66dce856ea94925c0dd15..46a535318c7af64ea586baf230067f9e2d4ade5b 100644 (file)
@@ -887,8 +887,8 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
                offset = ((u32)(skb->len-copy_len + 3) & ~3);
        }
 #ifdef NETIF_F_HW_VLAN_CTAG_TX
-       if (vlan_tx_tag_present(skb)) {
-               u16 vlan_tag = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               u16 vlan_tag = skb_vlan_tag_get(skb);
                vlan_tag = (vlan_tag << 4) |
                        (vlan_tag >> 13) |
                        ((vlan_tag >> 9) & 0x8);
index 05c6af6c418fa45690d085885b0cca330b58c210..3007d95fbb9f69c460a83d57bc66c7488421a627 100644 (file)
@@ -1167,10 +1167,10 @@ static int bgmac_poll(struct napi_struct *napi, int weight)
                bgmac->int_status = 0;
        }
 
-       if (handled < weight)
+       if (handled < weight) {
                napi_complete(napi);
-
-       bgmac_chip_intrs_on(bgmac);
+               bgmac_chip_intrs_on(bgmac);
+       }
 
        return handled;
 }
@@ -1515,6 +1515,8 @@ static int bgmac_probe(struct bcma_device *core)
        if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
                bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
 
+       netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
+
        err = bgmac_mii_register(bgmac);
        if (err) {
                bgmac_err(bgmac, "Cannot register MDIO\n");
@@ -1529,8 +1531,6 @@ static int bgmac_probe(struct bcma_device *core)
 
        netif_carrier_off(net_dev);
 
-       netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
-
        return 0;
 
 err_mii_unregister:
@@ -1549,9 +1549,9 @@ static void bgmac_remove(struct bcma_device *core)
 {
        struct bgmac *bgmac = bcma_get_drvdata(core);
 
-       netif_napi_del(&bgmac->napi);
        unregister_netdev(bgmac->net_dev);
        bgmac_mii_unregister(bgmac);
+       netif_napi_del(&bgmac->napi);
        bgmac_dma_free(bgmac);
        bcma_set_drvdata(core, NULL);
        free_netdev(bgmac->net_dev);
index 823d01c5684caf3373d51ee09fe615a0e8e7a07b..02bf0b86995b8e5caac66cffd174208349fce519 100644 (file)
@@ -6597,9 +6597,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
                vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
        }
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                vlan_tag_flags |=
-                       (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
+                       (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
        }
 
        if ((mss = skb_shinfo(skb)->gso_size)) {
index 792ba72fb5c815cded9d20cc01024e7c7678f380..756053c028becff93c62cfcdbd6e1f635594cc01 100644 (file)
@@ -1138,12 +1138,8 @@ struct bnx2x_port {
        u32                     link_config[LINK_CONFIG_SIZE];
 
        u32                     supported[LINK_CONFIG_SIZE];
-/* link settings - missing defines */
-#define SUPPORTED_2500baseX_Full       (1 << 15)
 
        u32                     advertising[LINK_CONFIG_SIZE];
-/* link settings - missing defines */
-#define ADVERTISED_2500baseX_Full      (1 << 15)
 
        u32                     phy_addr;
 
index 1d1147c93d5972147a9aa17650eeaadb0dda7066..0a9faa134a9aed3eda559f81a9f5ab12324925db 100644 (file)
@@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
                }
 #endif
                if (!bnx2x_fp_lock_napi(fp))
-                       return work_done;
+                       return budget;
 
                for_each_cos_in_tx_queue(fp, cos)
                        if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
@@ -3865,9 +3865,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
           "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
           pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                tx_start_bd->vlan_or_ethertype =
-                   cpu_to_le16(vlan_tx_tag_get(skb));
+                   cpu_to_le16(skb_vlan_tag_get(skb));
                tx_start_bd->bd_flags.as_bitfield |=
                    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
        } else {
index 0758c8bef4ba5b8e24cb9388639ed21cef0df5a3..7155e1d2c208c7253b846ee954086fef5fd574da 100644 (file)
@@ -9169,7 +9169,7 @@ static void bnx2x_disable_ptp(struct bnx2x *bp)
 }
 
 /* Called during unload, to stop PTP-related stuff */
-void bnx2x_stop_ptp(struct bnx2x *bp)
+static void bnx2x_stop_ptp(struct bnx2x *bp)
 {
        /* Cancel PTP work queue. Should be done after the Tx queues are
         * drained to prevent additional scheduling.
@@ -13318,7 +13318,7 @@ static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
        return -ENOTSUPP;
 }
 
-void bnx2x_register_phc(struct bnx2x *bp)
+static void bnx2x_register_phc(struct bnx2x *bp)
 {
        /* Fill the ptp_clock_info struct and register PTP clock*/
        bp->ptp_clock_info.owner = THIS_MODULE;
@@ -14635,7 +14635,7 @@ static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
        return bnx2x_func_state_change(bp, &func_params);
 }
 
-int bnx2x_enable_ptp_packets(struct bnx2x *bp)
+static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
 {
        struct bnx2x_queue_state_params q_params;
        int rc, i;
index 553dcd8a9df29f64108285c1f520635e287f7ca8..615a6dbde04797651d29e566abfc64b82e1cae1f 100644 (file)
@@ -7413,6 +7413,8 @@ static inline void tg3_netif_start(struct tg3 *tp)
 }
 
 static void tg3_irq_quiesce(struct tg3 *tp)
+       __releases(tp->lock)
+       __acquires(tp->lock)
 {
        int i;
 
@@ -7421,8 +7423,12 @@ static void tg3_irq_quiesce(struct tg3 *tp)
        tp->irq_sync = 1;
        smp_mb();
 
+       spin_unlock_bh(&tp->lock);
+
        for (i = 0; i < tp->irq_cnt; i++)
                synchronize_irq(tp->napi[i].irq_vec);
+
+       spin_lock_bh(&tp->lock);
 }
 
 /* Fully shutdown all tg3 driver activity elsewhere in the system.
@@ -8002,9 +8008,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
            !mss && skb->len > VLAN_ETH_FRAME_LEN)
                base_flags |= TXD_FLAG_JMB_PKT;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                base_flags |= TXD_FLAG_VLAN;
-               vlan = vlan_tx_tag_get(skb);
+               vlan = skb_vlan_tag_get(skb);
        }
 
        if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
@@ -9018,6 +9024,8 @@ static void tg3_restore_clk(struct tg3 *tp)
 
 /* tp->lock is held. */
 static int tg3_chip_reset(struct tg3 *tp)
+       __releases(tp->lock)
+       __acquires(tp->lock)
 {
        u32 val;
        void (*write_op)(struct tg3 *, u32, u32);
@@ -9073,9 +9081,13 @@ static int tg3_chip_reset(struct tg3 *tp)
        }
        smp_mb();
 
+       tg3_full_unlock(tp);
+
        for (i = 0; i < tp->irq_cnt; i++)
                synchronize_irq(tp->napi[i].irq_vec);
 
+       tg3_full_lock(tp, 0);
+
        if (tg3_asic_rev(tp) == ASIC_REV_57780) {
                val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
                tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
@@ -10903,11 +10915,13 @@ static void tg3_timer(unsigned long __opaque)
 {
        struct tg3 *tp = (struct tg3 *) __opaque;
 
-       if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
-               goto restart_timer;
-
        spin_lock(&tp->lock);
 
+       if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
+               spin_unlock(&tp->lock);
+               goto restart_timer;
+       }
+
        if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
            tg3_flag(tp, 57765_CLASS))
                tg3_chk_missed_msi(tp);
@@ -11101,11 +11115,13 @@ static void tg3_reset_task(struct work_struct *work)
        struct tg3 *tp = container_of(work, struct tg3, reset_task);
        int err;
 
+       rtnl_lock();
        tg3_full_lock(tp, 0);
 
        if (!netif_running(tp->dev)) {
                tg3_flag_clear(tp, RESET_TASK_PENDING);
                tg3_full_unlock(tp);
+               rtnl_unlock();
                return;
        }
 
@@ -11138,6 +11154,7 @@ out:
                tg3_phy_start(tp);
 
        tg3_flag_clear(tp, RESET_TASK_PENDING);
+       rtnl_unlock();
 }
 
 static int tg3_request_irq(struct tg3 *tp, int irq_num)
@@ -11556,11 +11573,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
        tg3_flag_set(tp, INIT_COMPLETE);
        tg3_enable_ints(tp);
 
-       if (init)
-               tg3_ptp_init(tp);
-       else
-               tg3_ptp_resume(tp);
-
+       tg3_ptp_resume(tp);
 
        tg3_full_unlock(tp);
 
@@ -11681,13 +11694,6 @@ static int tg3_open(struct net_device *dev)
                pci_set_power_state(tp->pdev, PCI_D3hot);
        }
 
-       if (tg3_flag(tp, PTP_CAPABLE)) {
-               tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
-                                                  &tp->pdev->dev);
-               if (IS_ERR(tp->ptp_clock))
-                       tp->ptp_clock = NULL;
-       }
-
        return err;
 }
 
@@ -11701,8 +11707,6 @@ static int tg3_close(struct net_device *dev)
                return -EAGAIN;
        }
 
-       tg3_ptp_fini(tp);
-
        tg3_stop(tp);
 
        /* Clear stats across close / open calls */
@@ -17880,6 +17884,14 @@ static int tg3_init_one(struct pci_dev *pdev,
                goto err_out_apeunmap;
        }
 
+       if (tg3_flag(tp, PTP_CAPABLE)) {
+               tg3_ptp_init(tp);
+               tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
+                                                  &tp->pdev->dev);
+               if (IS_ERR(tp->ptp_clock))
+                       tp->ptp_clock = NULL;
+       }
+
        netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
                    tp->board_part_number,
                    tg3_chip_rev_id(tp),
@@ -17955,6 +17967,8 @@ static void tg3_remove_one(struct pci_dev *pdev)
        if (dev) {
                struct tg3 *tp = netdev_priv(dev);
 
+               tg3_ptp_fini(tp);
+
                release_firmware(tp->fw);
 
                tg3_reset_task_cancel(tp);
index 323721838cf93e7d75e498cd98ab2cf1d78a3233..7714d7790089cc1c35aa37511d07ccce7d27dfa3 100644 (file)
@@ -2824,8 +2824,8 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
        u32 gso_size;
        u16 vlan_tag = 0;
 
-       if (vlan_tx_tag_present(skb)) {
-               vlan_tag = (u16)vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               vlan_tag = (u16)skb_vlan_tag_get(skb);
                flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
        }
        if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
index 55eb7f2af2b41ccf031f5018c12137daf752dc6c..7ef55f5fa664480ce052720bc55bd5ffb9ca8b57 100644 (file)
@@ -340,7 +340,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
                res = PTR_ERR(lp->pclk);
                goto err_free_dev;
        }
-       clk_enable(lp->pclk);
+       clk_prepare_enable(lp->pclk);
 
        lp->hclk = ERR_PTR(-ENOENT);
        lp->tx_clk = ERR_PTR(-ENOENT);
@@ -406,7 +406,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
 err_out_unregister_netdev:
        unregister_netdev(dev);
 err_disable_clock:
-       clk_disable(lp->pclk);
+       clk_disable_unprepare(lp->pclk);
 err_free_dev:
        free_netdev(dev);
        return res;
@@ -424,7 +424,7 @@ static int at91ether_remove(struct platform_device *pdev)
        kfree(lp->mii_bus->irq);
        mdiobus_free(lp->mii_bus);
        unregister_netdev(dev);
-       clk_disable(lp->pclk);
+       clk_disable_unprepare(lp->pclk);
        free_netdev(dev);
 
        return 0;
@@ -440,7 +440,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
                netif_stop_queue(net_dev);
                netif_device_detach(net_dev);
 
-               clk_disable(lp->pclk);
+               clk_disable_unprepare(lp->pclk);
        }
        return 0;
 }
@@ -451,7 +451,7 @@ static int at91ether_resume(struct platform_device *pdev)
        struct macb *lp = netdev_priv(net_dev);
 
        if (netif_running(net_dev)) {
-               clk_enable(lp->pclk);
+               clk_prepare_enable(lp->pclk);
 
                netif_device_attach(net_dev);
                netif_start_queue(net_dev);
index 3767271c7667f69ef212fb2cb31105910121b516..ad76b8e35a00e188e39d00f4c5f70d97c3df5363 100644 (file)
@@ -1691,7 +1691,7 @@ static int hash_get_index(__u8 *addr)
 
        for (j = 0; j < 6; j++) {
                for (i = 0, bitval = 0; i < 8; i++)
-                       bitval ^= hash_bit_value(i*6 + j, addr);
+                       bitval ^= hash_bit_value(i * 6 + j, addr);
 
                hash_index |= (bitval << j);
        }
@@ -1827,12 +1827,23 @@ static int macb_close(struct net_device *dev)
 
 static void gem_update_stats(struct macb *bp)
 {
-       u32 __iomem *reg = bp->regs + GEM_OTX;
+       int i;
        u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
-       u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
 
-       for (; p < end; p++, reg++)
-               *p += __raw_readl(reg);
+       for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
+               u32 offset = gem_statistics[i].offset;
+               u64 val = __raw_readl(bp->regs + offset);
+
+               bp->ethtool_stats[i] += val;
+               *p += val;
+
+               if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
+                       /* Add GEM_OCTTXH, GEM_OCTRXH */
+                       val = __raw_readl(bp->regs + offset + 4);
+                       bp->ethtool_stats[i] += ((u64)val) << 32;
+                       *(++p) += val;
+               }
+       }
 }
 
 static struct net_device_stats *gem_get_stats(struct macb *bp)
@@ -1873,6 +1884,39 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
        return nstat;
 }
 
+static void gem_get_ethtool_stats(struct net_device *dev,
+                                 struct ethtool_stats *stats, u64 *data)
+{
+       struct macb *bp;
+
+       bp = netdev_priv(dev);
+       gem_update_stats(bp);
+       memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
+}
+
+static int gem_get_sset_count(struct net_device *dev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return GEM_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
+{
+       int i;
+
+       switch (sset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
+                       memcpy(p, gem_statistics[i].stat_string,
+                              ETH_GSTRING_LEN);
+               break;
+       }
+}
+
 struct net_device_stats *macb_get_stats(struct net_device *dev)
 {
        struct macb *bp = netdev_priv(dev);
@@ -1991,6 +2035,18 @@ const struct ethtool_ops macb_ethtool_ops = {
 };
 EXPORT_SYMBOL_GPL(macb_ethtool_ops);
 
+static const struct ethtool_ops gem_ethtool_ops = {
+       .get_settings           = macb_get_settings,
+       .set_settings           = macb_set_settings,
+       .get_regs_len           = macb_get_regs_len,
+       .get_regs               = macb_get_regs,
+       .get_link               = ethtool_op_get_link,
+       .get_ts_info            = ethtool_op_get_ts_info,
+       .get_ethtool_stats      = gem_get_ethtool_stats,
+       .get_strings            = gem_get_ethtool_strings,
+       .get_sset_count         = gem_get_sset_count,
+};
+
 int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
        struct macb *bp = netdev_priv(dev);
@@ -2148,7 +2204,7 @@ static void macb_probe_queues(void __iomem *mem,
                        (*num_queues)++;
 }
 
-static int __init macb_probe(struct platform_device *pdev)
+static int macb_probe(struct platform_device *pdev)
 {
        struct macb_platform_data *pdata;
        struct resource *regs;
@@ -2278,7 +2334,6 @@ static int __init macb_probe(struct platform_device *pdev)
 
        dev->netdev_ops = &macb_netdev_ops;
        netif_napi_add(dev, &bp->napi, macb_poll, 64);
-       dev->ethtool_ops = &macb_ethtool_ops;
 
        dev->base_addr = regs->start;
 
@@ -2292,12 +2347,14 @@ static int __init macb_probe(struct platform_device *pdev)
                bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
                bp->macbgem_ops.mog_init_rings = gem_init_rings;
                bp->macbgem_ops.mog_rx = gem_rx;
+               dev->ethtool_ops = &gem_ethtool_ops;
        } else {
                bp->max_tx_length = MACB_MAX_TX_LEN;
                bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
                bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
                bp->macbgem_ops.mog_init_rings = macb_init_rings;
                bp->macbgem_ops.mog_rx = macb_rx;
+               dev->ethtool_ops = &macb_ethtool_ops;
        }
 
        /* Set features */
@@ -2386,7 +2443,7 @@ err_out:
        return err;
 }
 
-static int __exit macb_remove(struct platform_device *pdev)
+static int macb_remove(struct platform_device *pdev)
 {
        struct net_device *dev;
        struct macb *bp;
@@ -2411,8 +2468,7 @@ static int __exit macb_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int macb_suspend(struct device *dev)
+static int __maybe_unused macb_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct net_device *netdev = platform_get_drvdata(pdev);
@@ -2429,7 +2485,7 @@ static int macb_suspend(struct device *dev)
        return 0;
 }
 
-static int macb_resume(struct device *dev)
+static int __maybe_unused macb_resume(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct net_device *netdev = platform_get_drvdata(pdev);
@@ -2444,12 +2500,12 @@ static int macb_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
 static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
 
 static struct platform_driver macb_driver = {
-       .remove         = __exit_p(macb_remove),
+       .probe          = macb_probe,
+       .remove         = macb_remove,
        .driver         = {
                .name           = "macb",
                .of_match_table = of_match_ptr(macb_dt_ids),
@@ -2457,7 +2513,7 @@ static struct platform_driver macb_driver = {
        },
 };
 
-module_platform_driver_probe(macb_driver, macb_probe);
+module_platform_driver(macb_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
index 084191b6fad2065331947c2b1d377ae943a545d1..31dc080f2437b6b02dde206b1e7c5343100fdde9 100644 (file)
 #define MACB_MAX_QUEUES 8
 
 /* MACB register offsets */
-#define MACB_NCR                               0x0000
-#define MACB_NCFGR                             0x0004
-#define MACB_NSR                               0x0008
-#define MACB_TAR                               0x000c /* AT91RM9200 only */
-#define MACB_TCR                               0x0010 /* AT91RM9200 only */
-#define MACB_TSR                               0x0014
-#define MACB_RBQP                              0x0018
-#define MACB_TBQP                              0x001c
-#define MACB_RSR                               0x0020
-#define MACB_ISR                               0x0024
-#define MACB_IER                               0x0028
-#define MACB_IDR                               0x002c
-#define MACB_IMR                               0x0030
-#define MACB_MAN                               0x0034
-#define MACB_PTR                               0x0038
-#define MACB_PFR                               0x003c
-#define MACB_FTO                               0x0040
-#define MACB_SCF                               0x0044
-#define MACB_MCF                               0x0048
-#define MACB_FRO                               0x004c
-#define MACB_FCSE                              0x0050
-#define MACB_ALE                               0x0054
-#define MACB_DTF                               0x0058
-#define MACB_LCOL                              0x005c
-#define MACB_EXCOL                             0x0060
-#define MACB_TUND                              0x0064
-#define MACB_CSE                               0x0068
-#define MACB_RRE                               0x006c
-#define MACB_ROVR                              0x0070
-#define MACB_RSE                               0x0074
-#define MACB_ELE                               0x0078
-#define MACB_RJA                               0x007c
-#define MACB_USF                               0x0080
-#define MACB_STE                               0x0084
-#define MACB_RLE                               0x0088
-#define MACB_TPF                               0x008c
-#define MACB_HRB                               0x0090
-#define MACB_HRT                               0x0094
-#define MACB_SA1B                              0x0098
-#define MACB_SA1T                              0x009c
-#define MACB_SA2B                              0x00a0
-#define MACB_SA2T                              0x00a4
-#define MACB_SA3B                              0x00a8
-#define MACB_SA3T                              0x00ac
-#define MACB_SA4B                              0x00b0
-#define MACB_SA4T                              0x00b4
-#define MACB_TID                               0x00b8
-#define MACB_TPQ                               0x00bc
-#define MACB_USRIO                             0x00c0
-#define MACB_WOL                               0x00c4
-#define MACB_MID                               0x00fc
+#define MACB_NCR               0x0000 /* Network Control */
+#define MACB_NCFGR             0x0004 /* Network Config */
+#define MACB_NSR               0x0008 /* Network Status */
+#define MACB_TAR               0x000c /* AT91RM9200 only */
+#define MACB_TCR               0x0010 /* AT91RM9200 only */
+#define MACB_TSR               0x0014 /* Transmit Status */
+#define MACB_RBQP              0x0018 /* RX Q Base Address */
+#define MACB_TBQP              0x001c /* TX Q Base Address */
+#define MACB_RSR               0x0020 /* Receive Status */
+#define MACB_ISR               0x0024 /* Interrupt Status */
+#define MACB_IER               0x0028 /* Interrupt Enable */
+#define MACB_IDR               0x002c /* Interrupt Disable */
+#define MACB_IMR               0x0030 /* Interrupt Mask */
+#define MACB_MAN               0x0034 /* PHY Maintenance */
+#define MACB_PTR               0x0038
+#define MACB_PFR               0x003c
+#define MACB_FTO               0x0040
+#define MACB_SCF               0x0044
+#define MACB_MCF               0x0048
+#define MACB_FRO               0x004c
+#define MACB_FCSE              0x0050
+#define MACB_ALE               0x0054
+#define MACB_DTF               0x0058
+#define MACB_LCOL              0x005c
+#define MACB_EXCOL             0x0060
+#define MACB_TUND              0x0064
+#define MACB_CSE               0x0068
+#define MACB_RRE               0x006c
+#define MACB_ROVR              0x0070
+#define MACB_RSE               0x0074
+#define MACB_ELE               0x0078
+#define MACB_RJA               0x007c
+#define MACB_USF               0x0080
+#define MACB_STE               0x0084
+#define MACB_RLE               0x0088
+#define MACB_TPF               0x008c
+#define MACB_HRB               0x0090
+#define MACB_HRT               0x0094
+#define MACB_SA1B              0x0098
+#define MACB_SA1T              0x009c
+#define MACB_SA2B              0x00a0
+#define MACB_SA2T              0x00a4
+#define MACB_SA3B              0x00a8
+#define MACB_SA3T              0x00ac
+#define MACB_SA4B              0x00b0
+#define MACB_SA4T              0x00b4
+#define MACB_TID               0x00b8
+#define MACB_TPQ               0x00bc
+#define MACB_USRIO             0x00c0
+#define MACB_WOL               0x00c4
+#define MACB_MID               0x00fc
 
 /* GEM register offsets. */
-#define GEM_NCFGR                              0x0004
-#define GEM_USRIO                              0x000c
-#define GEM_DMACFG                             0x0010
-#define GEM_HRB                                        0x0080
-#define GEM_HRT                                        0x0084
-#define GEM_SA1B                               0x0088
-#define GEM_SA1T                               0x008C
-#define GEM_SA2B                               0x0090
-#define GEM_SA2T                               0x0094
-#define GEM_SA3B                               0x0098
-#define GEM_SA3T                               0x009C
-#define GEM_SA4B                               0x00A0
-#define GEM_SA4T                               0x00A4
-#define GEM_OTX                                        0x0100
-#define GEM_DCFG1                              0x0280
-#define GEM_DCFG2                              0x0284
-#define GEM_DCFG3                              0x0288
-#define GEM_DCFG4                              0x028c
-#define GEM_DCFG5                              0x0290
-#define GEM_DCFG6                              0x0294
-#define GEM_DCFG7                              0x0298
-
-#define GEM_ISR(hw_q)                          (0x0400 + ((hw_q) << 2))
-#define GEM_TBQP(hw_q)                         (0x0440 + ((hw_q) << 2))
-#define GEM_RBQP(hw_q)                         (0x0480 + ((hw_q) << 2))
-#define GEM_IER(hw_q)                          (0x0600 + ((hw_q) << 2))
-#define GEM_IDR(hw_q)                          (0x0620 + ((hw_q) << 2))
-#define GEM_IMR(hw_q)                          (0x0640 + ((hw_q) << 2))
+#define GEM_NCFGR              0x0004 /* Network Config */
+#define GEM_USRIO              0x000c /* User IO */
+#define GEM_DMACFG             0x0010 /* DMA Configuration */
+#define GEM_HRB                        0x0080 /* Hash Bottom */
+#define GEM_HRT                        0x0084 /* Hash Top */
+#define GEM_SA1B               0x0088 /* Specific1 Bottom */
+#define GEM_SA1T               0x008C /* Specific1 Top */
+#define GEM_SA2B               0x0090 /* Specific2 Bottom */
+#define GEM_SA2T               0x0094 /* Specific2 Top */
+#define GEM_SA3B               0x0098 /* Specific3 Bottom */
+#define GEM_SA3T               0x009C /* Specific3 Top */
+#define GEM_SA4B               0x00A0 /* Specific4 Bottom */
+#define GEM_SA4T               0x00A4 /* Specific4 Top */
+#define GEM_OTX                        0x0100 /* Octets transmitted */
+#define GEM_OCTTXL             0x0100 /* Octets transmitted [31:0] */
+#define GEM_OCTTXH             0x0104 /* Octets transmitted [47:32] */
+#define GEM_TXCNT              0x0108 /* Frames Transmitted counter */
+#define GEM_TXBCCNT            0x010c /* Broadcast Frames counter */
+#define GEM_TXMCCNT            0x0110 /* Multicast Frames counter */
+#define GEM_TXPAUSECNT         0x0114 /* Pause Frames Transmitted Counter */
+#define GEM_TX64CNT            0x0118 /* 64 byte Frames TX counter */
+#define GEM_TX65CNT            0x011c /* 65-127 byte Frames TX counter */
+#define GEM_TX128CNT           0x0120 /* 128-255 byte Frames TX counter */
+#define GEM_TX256CNT           0x0124 /* 256-511 byte Frames TX counter */
+#define GEM_TX512CNT           0x0128 /* 512-1023 byte Frames TX counter */
+#define GEM_TX1024CNT          0x012c /* 1024-1518 byte Frames TX counter */
+#define GEM_TX1519CNT          0x0130 /* 1519+ byte Frames TX counter */
+#define GEM_TXURUNCNT          0x0134 /* TX under run error counter */
+#define GEM_SNGLCOLLCNT                0x0138 /* Single Collision Frame Counter */
+#define GEM_MULTICOLLCNT       0x013c /* Multiple Collision Frame Counter */
+#define GEM_EXCESSCOLLCNT      0x0140 /* Excessive Collision Frame Counter */
+#define GEM_LATECOLLCNT                0x0144 /* Late Collision Frame Counter */
+#define GEM_TXDEFERCNT         0x0148 /* Deferred Transmission Frame Counter */
+#define GEM_TXCSENSECNT                0x014c /* Carrier Sense Error Counter */
+#define GEM_ORX                        0x0150 /* Octets received */
+#define GEM_OCTRXL             0x0150 /* Octets received [31:0] */
+#define GEM_OCTRXH             0x0154 /* Octets received [47:32] */
+#define GEM_RXCNT              0x0158 /* Frames Received Counter */
+#define GEM_RXBROADCNT         0x015c /* Broadcast Frames Received Counter */
+#define GEM_RXMULTICNT         0x0160 /* Multicast Frames Received Counter */
+#define GEM_RXPAUSECNT         0x0164 /* Pause Frames Received Counter */
+#define GEM_RX64CNT            0x0168 /* 64 byte Frames RX Counter */
+#define GEM_RX65CNT            0x016c /* 65-127 byte Frames RX Counter */
+#define GEM_RX128CNT           0x0170 /* 128-255 byte Frames RX Counter */
+#define GEM_RX256CNT           0x0174 /* 256-511 byte Frames RX Counter */
+#define GEM_RX512CNT           0x0178 /* 512-1023 byte Frames RX Counter */
+#define GEM_RX1024CNT          0x017c /* 1024-1518 byte Frames RX Counter */
+#define GEM_RX1519CNT          0x0180 /* 1519+ byte Frames RX Counter */
+#define GEM_RXUNDRCNT          0x0184 /* Undersize Frames Received Counter */
+#define GEM_RXOVRCNT           0x0188 /* Oversize Frames Received Counter */
+#define GEM_RXJABCNT           0x018c /* Jabbers Received Counter */
+#define GEM_RXFCSCNT           0x0190 /* Frame Check Sequence Error Counter */
+#define GEM_RXLENGTHCNT                0x0194 /* Length Field Error Counter */
+#define GEM_RXSYMBCNT          0x0198 /* Symbol Error Counter */
+#define GEM_RXALIGNCNT         0x019c /* Alignment Error Counter */
+#define GEM_RXRESERRCNT                0x01a0 /* Receive Resource Error Counter */
+#define GEM_RXORCNT            0x01a4 /* Receive Overrun Counter */
+#define GEM_RXIPCCNT           0x01a8 /* IP header Checksum Error Counter */
+#define GEM_RXTCPCCNT          0x01ac /* TCP Checksum Error Counter */
+#define GEM_RXUDPCCNT          0x01b0 /* UDP Checksum Error Counter */
+#define GEM_DCFG1              0x0280 /* Design Config 1 */
+#define GEM_DCFG2              0x0284 /* Design Config 2 */
+#define GEM_DCFG3              0x0288 /* Design Config 3 */
+#define GEM_DCFG4              0x028c /* Design Config 4 */
+#define GEM_DCFG5              0x0290 /* Design Config 5 */
+#define GEM_DCFG6              0x0294 /* Design Config 6 */
+#define GEM_DCFG7              0x0298 /* Design Config 7 */
+
+#define GEM_ISR(hw_q)          (0x0400 + ((hw_q) << 2))
+#define GEM_TBQP(hw_q)         (0x0440 + ((hw_q) << 2))
+#define GEM_RBQP(hw_q)         (0x0480 + ((hw_q) << 2))
+#define GEM_IER(hw_q)          (0x0600 + ((hw_q) << 2))
+#define GEM_IDR(hw_q)          (0x0620 + ((hw_q) << 2))
+#define GEM_IMR(hw_q)          (0x0640 + ((hw_q) << 2))
 
 /* Bitfields in NCR */
-#define MACB_LB_OFFSET                         0
-#define MACB_LB_SIZE                           1
-#define MACB_LLB_OFFSET                                1
-#define MACB_LLB_SIZE                          1
-#define MACB_RE_OFFSET                         2
-#define MACB_RE_SIZE                           1
-#define MACB_TE_OFFSET                         3
-#define MACB_TE_SIZE                           1
-#define MACB_MPE_OFFSET                                4
-#define MACB_MPE_SIZE                          1
-#define MACB_CLRSTAT_OFFSET                    5
-#define MACB_CLRSTAT_SIZE                      1
-#define MACB_INCSTAT_OFFSET                    6
-#define MACB_INCSTAT_SIZE                      1
-#define MACB_WESTAT_OFFSET                     7
-#define MACB_WESTAT_SIZE                       1
-#define MACB_BP_OFFSET                         8
-#define MACB_BP_SIZE                           1
-#define MACB_TSTART_OFFSET                     9
-#define MACB_TSTART_SIZE                       1
-#define MACB_THALT_OFFSET                      10
-#define MACB_THALT_SIZE                                1
-#define MACB_NCR_TPF_OFFSET                    11
-#define MACB_NCR_TPF_SIZE                      1
-#define MACB_TZQ_OFFSET                                12
-#define MACB_TZQ_SIZE                          1
+#define MACB_LB_OFFSET         0 /* reserved */
+#define MACB_LB_SIZE           1
+#define MACB_LLB_OFFSET                1 /* Loop back local */
+#define MACB_LLB_SIZE          1
+#define MACB_RE_OFFSET         2 /* Receive enable */
+#define MACB_RE_SIZE           1
+#define MACB_TE_OFFSET         3 /* Transmit enable */
+#define MACB_TE_SIZE           1
+#define MACB_MPE_OFFSET                4 /* Management port enable */
+#define MACB_MPE_SIZE          1
+#define MACB_CLRSTAT_OFFSET    5 /* Clear stats regs */
+#define MACB_CLRSTAT_SIZE      1
+#define MACB_INCSTAT_OFFSET    6 /* Incremental stats regs */
+#define MACB_INCSTAT_SIZE      1
+#define MACB_WESTAT_OFFSET     7 /* Write enable stats regs */
+#define MACB_WESTAT_SIZE       1
+#define MACB_BP_OFFSET         8 /* Back pressure */
+#define MACB_BP_SIZE           1
+#define MACB_TSTART_OFFSET     9 /* Start transmission */
+#define MACB_TSTART_SIZE       1
+#define MACB_THALT_OFFSET      10 /* Transmit halt */
+#define MACB_THALT_SIZE                1
+#define MACB_NCR_TPF_OFFSET    11 /* Transmit pause frame */
+#define MACB_NCR_TPF_SIZE      1
+#define MACB_TZQ_OFFSET                12 /* Transmit zero quantum pause frame */
+#define MACB_TZQ_SIZE          1
 
 /* Bitfields in NCFGR */
-#define MACB_SPD_OFFSET                                0
-#define MACB_SPD_SIZE                          1
-#define MACB_FD_OFFSET                         1
-#define MACB_FD_SIZE                           1
-#define MACB_BIT_RATE_OFFSET                   2
-#define MACB_BIT_RATE_SIZE                     1
-#define MACB_JFRAME_OFFSET                     3
-#define MACB_JFRAME_SIZE                       1
-#define MACB_CAF_OFFSET                                4
-#define MACB_CAF_SIZE                          1
-#define MACB_NBC_OFFSET                                5
-#define MACB_NBC_SIZE                          1
-#define MACB_NCFGR_MTI_OFFSET                  6
-#define MACB_NCFGR_MTI_SIZE                    1
-#define MACB_UNI_OFFSET                                7
-#define MACB_UNI_SIZE                          1
-#define MACB_BIG_OFFSET                                8
-#define MACB_BIG_SIZE                          1
-#define MACB_EAE_OFFSET                                9
-#define MACB_EAE_SIZE                          1
-#define MACB_CLK_OFFSET                                10
-#define MACB_CLK_SIZE                          2
-#define MACB_RTY_OFFSET                                12
-#define MACB_RTY_SIZE                          1
-#define MACB_PAE_OFFSET                                13
-#define MACB_PAE_SIZE                          1
-#define MACB_RM9200_RMII_OFFSET                        13 /* AT91RM9200 only */
-#define MACB_RM9200_RMII_SIZE                  1  /* AT91RM9200 only */
-#define MACB_RBOF_OFFSET                       14
-#define MACB_RBOF_SIZE                         2
-#define MACB_RLCE_OFFSET                       16
-#define MACB_RLCE_SIZE                         1
-#define MACB_DRFCS_OFFSET                      17
-#define MACB_DRFCS_SIZE                                1
-#define MACB_EFRHD_OFFSET                      18
-#define MACB_EFRHD_SIZE                                1
-#define MACB_IRXFCS_OFFSET                     19
-#define MACB_IRXFCS_SIZE                       1
+#define MACB_SPD_OFFSET                0 /* Speed */
+#define MACB_SPD_SIZE          1
+#define MACB_FD_OFFSET         1 /* Full duplex */
+#define MACB_FD_SIZE           1
+#define MACB_BIT_RATE_OFFSET   2 /* Discard non-VLAN frames */
+#define MACB_BIT_RATE_SIZE     1
+#define MACB_JFRAME_OFFSET     3 /* reserved */
+#define MACB_JFRAME_SIZE       1
+#define MACB_CAF_OFFSET                4 /* Copy all frames */
+#define MACB_CAF_SIZE          1
+#define MACB_NBC_OFFSET                5 /* No broadcast */
+#define MACB_NBC_SIZE          1
+#define MACB_NCFGR_MTI_OFFSET  6 /* Multicast hash enable */
+#define MACB_NCFGR_MTI_SIZE    1
+#define MACB_UNI_OFFSET                7 /* Unicast hash enable */
+#define MACB_UNI_SIZE          1
+#define MACB_BIG_OFFSET                8 /* Receive 1536 byte frames */
+#define MACB_BIG_SIZE          1
+#define MACB_EAE_OFFSET                9 /* External address match enable */
+#define MACB_EAE_SIZE          1
+#define MACB_CLK_OFFSET                10
+#define MACB_CLK_SIZE          2
+#define MACB_RTY_OFFSET                12 /* Retry test */
+#define MACB_RTY_SIZE          1
+#define MACB_PAE_OFFSET                13 /* Pause enable */
+#define MACB_PAE_SIZE          1
+#define MACB_RM9200_RMII_OFFSET        13 /* AT91RM9200 only */
+#define MACB_RM9200_RMII_SIZE  1  /* AT91RM9200 only */
+#define MACB_RBOF_OFFSET       14 /* Receive buffer offset */
+#define MACB_RBOF_SIZE         2
+#define MACB_RLCE_OFFSET       16 /* Length field error frame discard */
+#define MACB_RLCE_SIZE         1
+#define MACB_DRFCS_OFFSET      17 /* FCS remove */
+#define MACB_DRFCS_SIZE                1
+#define MACB_EFRHD_OFFSET      18
+#define MACB_EFRHD_SIZE                1
+#define MACB_IRXFCS_OFFSET     19
+#define MACB_IRXFCS_SIZE       1
 
 /* GEM specific NCFGR bitfields. */
-#define GEM_GBE_OFFSET                         10
-#define GEM_GBE_SIZE                           1
-#define GEM_CLK_OFFSET                         18
-#define GEM_CLK_SIZE                           3
-#define GEM_DBW_OFFSET                         21
-#define GEM_DBW_SIZE                           2
-#define GEM_RXCOEN_OFFSET                      24
-#define GEM_RXCOEN_SIZE                                1
+#define GEM_GBE_OFFSET         10 /* Gigabit mode enable */
+#define GEM_GBE_SIZE           1
+#define GEM_CLK_OFFSET         18 /* MDC clock division */
+#define GEM_CLK_SIZE           3
+#define GEM_DBW_OFFSET         21 /* Data bus width */
+#define GEM_DBW_SIZE           2
+#define GEM_RXCOEN_OFFSET      24
+#define GEM_RXCOEN_SIZE                1
 
 /* Constants for data bus width. */
-#define GEM_DBW32                              0
-#define GEM_DBW64                              1
-#define GEM_DBW128                             2
+#define GEM_DBW32              0 /* 32 bit AMBA AHB data bus width */
+#define GEM_DBW64              1 /* 64 bit AMBA AHB data bus width */
+#define GEM_DBW128             2 /* 128 bit AMBA AHB data bus width */
 
 /* Bitfields in DMACFG. */
-#define GEM_FBLDO_OFFSET                       0
-#define GEM_FBLDO_SIZE                         5
-#define GEM_ENDIA_OFFSET                       7
-#define GEM_ENDIA_SIZE                         1
-#define GEM_RXBMS_OFFSET                       8
-#define GEM_RXBMS_SIZE                         2
-#define GEM_TXPBMS_OFFSET                      10
-#define GEM_TXPBMS_SIZE                                1
-#define GEM_TXCOEN_OFFSET                      11
-#define GEM_TXCOEN_SIZE                                1
-#define GEM_RXBS_OFFSET                                16
-#define GEM_RXBS_SIZE                          8
-#define GEM_DDRP_OFFSET                                24
-#define GEM_DDRP_SIZE                          1
+#define GEM_FBLDO_OFFSET       0 /* fixed burst length for DMA */
+#define GEM_FBLDO_SIZE         5
+#define GEM_ENDIA_OFFSET       7 /* endian swap mode for packet data access */
+#define GEM_ENDIA_SIZE         1
+#define GEM_RXBMS_OFFSET       8 /* RX packet buffer memory size select */
+#define GEM_RXBMS_SIZE         2
+#define GEM_TXPBMS_OFFSET      10 /* TX packet buffer memory size select */
+#define GEM_TXPBMS_SIZE                1
+#define GEM_TXCOEN_OFFSET      11 /* TX IP/TCP/UDP checksum gen offload */
+#define GEM_TXCOEN_SIZE                1
+#define GEM_RXBS_OFFSET                16 /* DMA receive buffer size */
+#define GEM_RXBS_SIZE          8
+#define GEM_DDRP_OFFSET                24 /* disc_when_no_ahb */
+#define GEM_DDRP_SIZE          1
 
 
 /* Bitfields in NSR */
-#define MACB_NSR_LINK_OFFSET                   0
-#define MACB_NSR_LINK_SIZE                     1
-#define MACB_MDIO_OFFSET                       1
-#define MACB_MDIO_SIZE                         1
-#define MACB_IDLE_OFFSET                       2
-#define MACB_IDLE_SIZE                         1
+#define MACB_NSR_LINK_OFFSET   0 /* pcs_link_state */
+#define MACB_NSR_LINK_SIZE     1
+#define MACB_MDIO_OFFSET       1 /* status of the mdio_in pin */
+#define MACB_MDIO_SIZE         1
+#define MACB_IDLE_OFFSET       2 /* The PHY management logic is idle */
+#define MACB_IDLE_SIZE         1
 
 /* Bitfields in TSR */
-#define MACB_UBR_OFFSET                                0
-#define MACB_UBR_SIZE                          1
-#define MACB_COL_OFFSET                                1
-#define MACB_COL_SIZE                          1
-#define MACB_TSR_RLE_OFFSET                    2
-#define MACB_TSR_RLE_SIZE                      1
-#define MACB_TGO_OFFSET                                3
-#define MACB_TGO_SIZE                          1
-#define MACB_BEX_OFFSET                                4
-#define MACB_BEX_SIZE                          1
-#define MACB_RM9200_BNQ_OFFSET                 4 /* AT91RM9200 only */
-#define MACB_RM9200_BNQ_SIZE                   1 /* AT91RM9200 only */
-#define MACB_COMP_OFFSET                       5
-#define MACB_COMP_SIZE                         1
-#define MACB_UND_OFFSET                                6
-#define MACB_UND_SIZE                          1
+#define MACB_UBR_OFFSET                0 /* Used bit read */
+#define MACB_UBR_SIZE          1
+#define MACB_COL_OFFSET                1 /* Collision occurred */
+#define MACB_COL_SIZE          1
+#define MACB_TSR_RLE_OFFSET    2 /* Retry limit exceeded */
+#define MACB_TSR_RLE_SIZE      1
+#define MACB_TGO_OFFSET                3 /* Transmit go */
+#define MACB_TGO_SIZE          1
+#define MACB_BEX_OFFSET                4 /* TX frame corruption due to AHB error */
+#define MACB_BEX_SIZE          1
+#define MACB_RM9200_BNQ_OFFSET 4 /* AT91RM9200 only */
+#define MACB_RM9200_BNQ_SIZE   1 /* AT91RM9200 only */
+#define MACB_COMP_OFFSET       5 /* Trnasmit complete */
+#define MACB_COMP_SIZE         1
+#define MACB_UND_OFFSET                6 /* Trnasmit under run */
+#define MACB_UND_SIZE          1
 
 /* Bitfields in RSR */
-#define MACB_BNA_OFFSET                                0
-#define MACB_BNA_SIZE                          1
-#define MACB_REC_OFFSET                                1
-#define MACB_REC_SIZE                          1
-#define MACB_OVR_OFFSET                                2
-#define MACB_OVR_SIZE                          1
+#define MACB_BNA_OFFSET                0 /* Buffer not available */
+#define MACB_BNA_SIZE          1
+#define MACB_REC_OFFSET                1 /* Frame received */
+#define MACB_REC_SIZE          1
+#define MACB_OVR_OFFSET                2 /* Receive overrun */
+#define MACB_OVR_SIZE          1
 
 /* Bitfields in ISR/IER/IDR/IMR */
-#define MACB_MFD_OFFSET                                0
-#define MACB_MFD_SIZE                          1
-#define MACB_RCOMP_OFFSET                      1
-#define MACB_RCOMP_SIZE                                1
-#define MACB_RXUBR_OFFSET                      2
-#define MACB_RXUBR_SIZE                                1
-#define MACB_TXUBR_OFFSET                      3
-#define MACB_TXUBR_SIZE                                1
-#define MACB_ISR_TUND_OFFSET                   4
-#define MACB_ISR_TUND_SIZE                     1
-#define MACB_ISR_RLE_OFFSET                    5
-#define MACB_ISR_RLE_SIZE                      1
-#define MACB_TXERR_OFFSET                      6
-#define MACB_TXERR_SIZE                                1
-#define MACB_TCOMP_OFFSET                      7
-#define MACB_TCOMP_SIZE                                1
-#define MACB_ISR_LINK_OFFSET                   9
-#define MACB_ISR_LINK_SIZE                     1
-#define MACB_ISR_ROVR_OFFSET                   10
-#define MACB_ISR_ROVR_SIZE                     1
-#define MACB_HRESP_OFFSET                      11
-#define MACB_HRESP_SIZE                                1
-#define MACB_PFR_OFFSET                                12
-#define MACB_PFR_SIZE                          1
-#define MACB_PTZ_OFFSET                                13
-#define MACB_PTZ_SIZE                          1
+#define MACB_MFD_OFFSET                0 /* Management frame sent */
+#define MACB_MFD_SIZE          1
+#define MACB_RCOMP_OFFSET      1 /* Receive complete */
+#define MACB_RCOMP_SIZE                1
+#define MACB_RXUBR_OFFSET      2 /* RX used bit read */
+#define MACB_RXUBR_SIZE                1
+#define MACB_TXUBR_OFFSET      3 /* TX used bit read */
+#define MACB_TXUBR_SIZE                1
+#define MACB_ISR_TUND_OFFSET   4 /* Enable TX buffer under run interrupt */
+#define MACB_ISR_TUND_SIZE     1
+#define MACB_ISR_RLE_OFFSET    5 /* EN retry exceeded/late coll interrupt */
+#define MACB_ISR_RLE_SIZE      1
+#define MACB_TXERR_OFFSET      6 /* EN TX frame corrupt from error interrupt */
+#define MACB_TXERR_SIZE                1
+#define MACB_TCOMP_OFFSET      7 /* Enable transmit complete interrupt */
+#define MACB_TCOMP_SIZE                1
+#define MACB_ISR_LINK_OFFSET   9 /* Enable link change interrupt */
+#define MACB_ISR_LINK_SIZE     1
+#define MACB_ISR_ROVR_OFFSET   10 /* Enable receive overrun interrupt */
+#define MACB_ISR_ROVR_SIZE     1
+#define MACB_HRESP_OFFSET      11 /* Enable hrsep not OK interrupt */
+#define MACB_HRESP_SIZE                1
+#define MACB_PFR_OFFSET                12 /* Enable pause frame w/ quantum interrupt */
+#define MACB_PFR_SIZE          1
+#define MACB_PTZ_OFFSET                13 /* Enable pause time zero interrupt */
+#define MACB_PTZ_SIZE          1
 
 /* Bitfields in MAN */
-#define MACB_DATA_OFFSET                       0
-#define MACB_DATA_SIZE                         16
-#define MACB_CODE_OFFSET                       16
-#define MACB_CODE_SIZE                         2
-#define MACB_REGA_OFFSET                       18
-#define MACB_REGA_SIZE                         5
-#define MACB_PHYA_OFFSET                       23
-#define MACB_PHYA_SIZE                         5
-#define MACB_RW_OFFSET                         28
-#define MACB_RW_SIZE                           2
-#define MACB_SOF_OFFSET                                30
-#define MACB_SOF_SIZE                          2
+#define MACB_DATA_OFFSET       0 /* data */
+#define MACB_DATA_SIZE         16
+#define MACB_CODE_OFFSET       16 /* Must be written to 10 */
+#define MACB_CODE_SIZE         2
+#define MACB_REGA_OFFSET       18 /* Register address */
+#define MACB_REGA_SIZE         5
+#define MACB_PHYA_OFFSET       23 /* PHY address */
+#define MACB_PHYA_SIZE         5
+#define MACB_RW_OFFSET         28 /* Operation. 10 is read. 01 is write. */
+#define MACB_RW_SIZE           2
+#define MACB_SOF_OFFSET                30 /* Must be written to 1 for Clause 22 */
+#define MACB_SOF_SIZE          2
 
 /* Bitfields in USRIO (AVR32) */
 #define MACB_MII_OFFSET                                0
 /* Bitfields in USRIO (AT91) */
 #define MACB_RMII_OFFSET                       0
 #define MACB_RMII_SIZE                         1
-#define GEM_RGMII_OFFSET                       0       /* GEM gigabit mode */
+#define GEM_RGMII_OFFSET                       0 /* GEM gigabit mode */
 #define GEM_RGMII_SIZE                         1
 #define MACB_CLKEN_OFFSET                      1
 #define MACB_CLKEN_SIZE                                1
 #define queue_writel(queue, reg, value)                        \
        __raw_writel((value), (queue)->bp->regs + (queue)->reg)
 
-/*
- * Conditional GEM/MACB macros.  These perform the operation to the correct
+/* Conditional GEM/MACB macros.  These perform the operation to the correct
  * register dependent on whether the device is a GEM or a MACB.  For registers
  * and bitfields that are common across both devices, use macb_{read,write}l
  * to avoid the cost of the conditional.
                __v; \
        })
 
-/**
- * struct macb_dma_desc - Hardware DMA descriptor
+/* struct macb_dma_desc - Hardware DMA descriptor
  * @addr: DMA address of data buffer
  * @ctrl: Control and status bits
  */
@@ -503,8 +547,7 @@ struct macb_dma_desc {
 /* limit RX checksum offload to TCP and UDP packets */
 #define GEM_RX_CSUM_CHECKED_MASK               2
 
-/**
- * struct macb_tx_skb - data about an skb which is being transmitted
+/* struct macb_tx_skb - data about an skb which is being transmitted
  * @skb: skb currently being transmitted, only set for the last buffer
  *       of the frame
  * @mapping: DMA address of the skb's fragment buffer
@@ -519,8 +562,7 @@ struct macb_tx_skb {
        bool                    mapped_as_page;
 };
 
-/*
- * Hardware-collected statistics. Used when updating the network
+/* Hardware-collected statistics. Used when updating the network
  * device stats by a periodic timer.
  */
 struct macb_stats {
@@ -595,6 +637,107 @@ struct gem_stats {
        u32     rx_udp_checksum_errors;
 };
 
+/* Describes the name and offset of an individual statistic register, as
+ * returned by `ethtool -S`. Also describes which net_device_stats statistics
+ * this register should contribute to.
+ */
+struct gem_statistic {
+       char stat_string[ETH_GSTRING_LEN];
+       int offset;
+       u32 stat_bits;
+};
+
+/* Bitfield defs for net_device_stat statistics */
+#define GEM_NDS_RXERR_OFFSET           0
+#define GEM_NDS_RXLENERR_OFFSET                1
+#define GEM_NDS_RXOVERERR_OFFSET       2
+#define GEM_NDS_RXCRCERR_OFFSET                3
+#define GEM_NDS_RXFRAMEERR_OFFSET      4
+#define GEM_NDS_RXFIFOERR_OFFSET       5
+#define GEM_NDS_TXERR_OFFSET           6
+#define GEM_NDS_TXABORTEDERR_OFFSET    7
+#define GEM_NDS_TXCARRIERERR_OFFSET    8
+#define GEM_NDS_TXFIFOERR_OFFSET       9
+#define GEM_NDS_COLLISIONS_OFFSET      10
+
+#define GEM_STAT_TITLE(name, title) GEM_STAT_TITLE_BITS(name, title, 0)
+#define GEM_STAT_TITLE_BITS(name, title, bits) {       \
+       .stat_string = title,                           \
+       .offset = GEM_##name,                           \
+       .stat_bits = bits                               \
+}
+
+/* list of gem statistic registers. The names MUST match the
+ * corresponding GEM_* definitions.
+ */
+static const struct gem_statistic gem_statistics[] = {
+       GEM_STAT_TITLE(OCTTXL, "tx_octets"), /* OCTTXH combined with OCTTXL */
+       GEM_STAT_TITLE(TXCNT, "tx_frames"),
+       GEM_STAT_TITLE(TXBCCNT, "tx_broadcast_frames"),
+       GEM_STAT_TITLE(TXMCCNT, "tx_multicast_frames"),
+       GEM_STAT_TITLE(TXPAUSECNT, "tx_pause_frames"),
+       GEM_STAT_TITLE(TX64CNT, "tx_64_byte_frames"),
+       GEM_STAT_TITLE(TX65CNT, "tx_65_127_byte_frames"),
+       GEM_STAT_TITLE(TX128CNT, "tx_128_255_byte_frames"),
+       GEM_STAT_TITLE(TX256CNT, "tx_256_511_byte_frames"),
+       GEM_STAT_TITLE(TX512CNT, "tx_512_1023_byte_frames"),
+       GEM_STAT_TITLE(TX1024CNT, "tx_1024_1518_byte_frames"),
+       GEM_STAT_TITLE(TX1519CNT, "tx_greater_than_1518_byte_frames"),
+       GEM_STAT_TITLE_BITS(TXURUNCNT, "tx_underrun",
+                           GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_TXFIFOERR)),
+       GEM_STAT_TITLE_BITS(SNGLCOLLCNT, "tx_single_collision_frames",
+                           GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+       GEM_STAT_TITLE_BITS(MULTICOLLCNT, "tx_multiple_collision_frames",
+                           GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+       GEM_STAT_TITLE_BITS(EXCESSCOLLCNT, "tx_excessive_collisions",
+                           GEM_BIT(NDS_TXERR)|
+                           GEM_BIT(NDS_TXABORTEDERR)|
+                           GEM_BIT(NDS_COLLISIONS)),
+       GEM_STAT_TITLE_BITS(LATECOLLCNT, "tx_late_collisions",
+                           GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+       GEM_STAT_TITLE(TXDEFERCNT, "tx_deferred_frames"),
+       GEM_STAT_TITLE_BITS(TXCSENSECNT, "tx_carrier_sense_errors",
+                           GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
+       GEM_STAT_TITLE(OCTRXL, "rx_octets"), /* OCTRXH combined with OCTRXL */
+       GEM_STAT_TITLE(RXCNT, "rx_frames"),
+       GEM_STAT_TITLE(RXBROADCNT, "rx_broadcast_frames"),
+       GEM_STAT_TITLE(RXMULTICNT, "rx_multicast_frames"),
+       GEM_STAT_TITLE(RXPAUSECNT, "rx_pause_frames"),
+       GEM_STAT_TITLE(RX64CNT, "rx_64_byte_frames"),
+       GEM_STAT_TITLE(RX65CNT, "rx_65_127_byte_frames"),
+       GEM_STAT_TITLE(RX128CNT, "rx_128_255_byte_frames"),
+       GEM_STAT_TITLE(RX256CNT, "rx_256_511_byte_frames"),
+       GEM_STAT_TITLE(RX512CNT, "rx_512_1023_byte_frames"),
+       GEM_STAT_TITLE(RX1024CNT, "rx_1024_1518_byte_frames"),
+       GEM_STAT_TITLE(RX1519CNT, "rx_greater_than_1518_byte_frames"),
+       GEM_STAT_TITLE_BITS(RXUNDRCNT, "rx_undersized_frames",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
+       GEM_STAT_TITLE_BITS(RXOVRCNT, "rx_oversize_frames",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
+       GEM_STAT_TITLE_BITS(RXJABCNT, "rx_jabbers",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
+       GEM_STAT_TITLE_BITS(RXFCSCNT, "rx_frame_check_sequence_errors",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXCRCERR)),
+       GEM_STAT_TITLE_BITS(RXLENGTHCNT, "rx_length_field_frame_errors",
+                           GEM_BIT(NDS_RXERR)),
+       GEM_STAT_TITLE_BITS(RXSYMBCNT, "rx_symbol_errors",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXFRAMEERR)),
+       GEM_STAT_TITLE_BITS(RXALIGNCNT, "rx_alignment_errors",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXOVERERR)),
+       GEM_STAT_TITLE_BITS(RXRESERRCNT, "rx_resource_errors",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXOVERERR)),
+       GEM_STAT_TITLE_BITS(RXORCNT, "rx_overruns",
+                           GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXFIFOERR)),
+       GEM_STAT_TITLE_BITS(RXIPCCNT, "rx_ip_header_checksum_errors",
+                           GEM_BIT(NDS_RXERR)),
+       GEM_STAT_TITLE_BITS(RXTCPCCNT, "rx_tcp_checksum_errors",
+                           GEM_BIT(NDS_RXERR)),
+       GEM_STAT_TITLE_BITS(RXUDPCCNT, "rx_udp_checksum_errors",
+                           GEM_BIT(NDS_RXERR)),
+};
+
+#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
+
 struct macb;
 
 struct macb_or_gem_ops {
@@ -673,6 +816,8 @@ struct macb {
        dma_addr_t skb_physaddr;                /* phys addr from pci_map_single */
        int skb_length;                         /* saved skb length for pci_unmap_single */
        unsigned int            max_tx_length;
+
+       u64                     ethtool_stats[GEM_STATS_LEN];
 };
 
 extern const struct ethtool_ops macb_ethtool_ops;
index babe2a915b0050936edcecf86675d43610794a70..526ea74e82d9590b248530148a4a0776411d608d 100644 (file)
@@ -1860,9 +1860,9 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
        cpl->iff = dev->if_port;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                cpl->vlan_valid = 1;
-               cpl->vlan = htons(vlan_tx_tag_get(skb));
+               cpl->vlan = htons(skb_vlan_tag_get(skb));
                st->vlan_insert++;
        } else
                cpl->vlan_valid = 0;
index 3dfcf600fcc68bee74b41a51942c283f7bf191aa..d6aa602f168d776cd1450fb10c8fdeb6c0118ae3 100644 (file)
@@ -1148,8 +1148,8 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
        cpl->len = htonl(skb->len);
        cntrl = V_TXPKT_INTF(pi->port_id);
 
-       if (vlan_tx_tag_present(skb))
-               cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
+       if (skb_vlan_tag_present(skb))
+               cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
 
        tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
        if (tso_info) {
@@ -1282,7 +1282,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                qs->port_stats[SGE_PSTAT_TX_CSUM]++;
        if (skb_shinfo(skb)->gso_size)
                qs->port_stats[SGE_PSTAT_TSO]++;
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                qs->port_stats[SGE_PSTAT_VLANINS]++;
 
        /*
index c74a898fcd4f7c41bd338ba08fa8e73b078ff5db..184a8d545ac4230e07788fbb831be2dcdfa57f83 100644 (file)
@@ -727,9 +727,9 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
                p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
        }
 
-       for (i = 0; i < 6; i++)
-               p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 +
-                                hex_to_bin(vpd.na_data[2 * i + 1]);
+       ret = hex2bin(p->eth_base, vpd.na_data, 6);
+       if (ret < 0)
+               return -EINVAL;
        return 0;
 }
 
index b8528077599730ec48853fc6ab6495a72e04f826..ae50cd72358cb4bda0c78784f6d0cb2586bac7c2 100644 (file)
@@ -4,6 +4,6 @@
 
 obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
 
-cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o
+cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o
 cxgb4-$(CONFIG_CHELSIO_T4_DCB) +=  cxgb4_dcb.o
 cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
new file mode 100644 (file)
index 0000000..9062a84
--- /dev/null
@@ -0,0 +1,317 @@
+/*
+ *  This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *  Copyright (C) 2003-2014 Chelsio Communications.  All rights reserved.
+ *
+ *  Written by Deepak (deepak.s@chelsio.com)
+ *
+ *  This program is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
+ *  release for licensing terms and conditions.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/jhash.h>
+#include <linux/if_vlan.h>
+#include <net/addrconf.h>
+#include "cxgb4.h"
+#include "clip_tbl.h"
+
+static inline unsigned int ipv4_clip_hash(struct clip_tbl *c, const u32 *key)
+{
+       unsigned int clipt_size_half = c->clipt_size / 2;
+
+       return jhash_1word(*key, 0) % clipt_size_half;
+}
+
+static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
+{
+       unsigned int clipt_size_half = d->clipt_size / 2;
+       u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
+
+       return clipt_size_half +
+               (jhash_1word(xor, 0) % clipt_size_half);
+}
+
+static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
+                                  int addr_len)
+{
+       return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) :
+                               ipv6_clip_hash(ctbl, addr);
+}
+
+static int clip6_get_mbox(const struct net_device *dev,
+                         const struct in6_addr *lip)
+{
+       struct adapter *adap = netdev2adap(dev);
+       struct fw_clip_cmd c;
+
+       memset(&c, 0, sizeof(c));
+       c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
+                             FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+       c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
+       *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
+       *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+       return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+static int clip6_release_mbox(const struct net_device *dev,
+                             const struct in6_addr *lip)
+{
+       struct adapter *adap = netdev2adap(dev);
+       struct fw_clip_cmd c;
+
+       memset(&c, 0, sizeof(c));
+       c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
+                             FW_CMD_REQUEST_F | FW_CMD_READ_F);
+       c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
+       *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
+       *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+       return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
+{
+       struct adapter *adap = netdev2adap(dev);
+       struct clip_tbl *ctbl = adap->clipt;
+       struct clip_entry *ce, *cte;
+       u32 *addr = (u32 *)lip;
+       int hash;
+       int addr_len;
+       int ret = 0;
+
+       if (!ctbl)
+               return 0;
+
+       if (v6)
+               addr_len = 16;
+       else
+               addr_len = 4;
+
+       hash = clip_addr_hash(ctbl, addr, addr_len);
+
+       read_lock_bh(&ctbl->lock);
+       list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
+               if (addr_len == cte->addr_len &&
+                   memcmp(lip, cte->addr, cte->addr_len) == 0) {
+                       ce = cte;
+                       read_unlock_bh(&ctbl->lock);
+                       goto found;
+               }
+       }
+       read_unlock_bh(&ctbl->lock);
+
+       write_lock_bh(&ctbl->lock);
+       if (!list_empty(&ctbl->ce_free_head)) {
+               ce = list_first_entry(&ctbl->ce_free_head,
+                                     struct clip_entry, list);
+               list_del(&ce->list);
+               INIT_LIST_HEAD(&ce->list);
+               spin_lock_init(&ce->lock);
+               atomic_set(&ce->refcnt, 0);
+               atomic_dec(&ctbl->nfree);
+               ce->addr_len = addr_len;
+               memcpy(ce->addr, lip, addr_len);
+               list_add_tail(&ce->list, &ctbl->hash_list[hash]);
+               if (v6) {
+                       ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
+                       if (ret) {
+                               write_unlock_bh(&ctbl->lock);
+                               return ret;
+                       }
+               }
+       } else {
+               write_unlock_bh(&ctbl->lock);
+               return -ENOMEM;
+       }
+       write_unlock_bh(&ctbl->lock);
+found:
+       atomic_inc(&ce->refcnt);
+
+       return 0;
+}
+EXPORT_SYMBOL(cxgb4_clip_get);
+
+void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
+{
+       struct adapter *adap = netdev2adap(dev);
+       struct clip_tbl *ctbl = adap->clipt;
+       struct clip_entry *ce, *cte;
+       u32 *addr = (u32 *)lip;
+       int hash;
+       int addr_len;
+
+       if (v6)
+               addr_len = 16;
+       else
+               addr_len = 4;
+
+       hash = clip_addr_hash(ctbl, addr, addr_len);
+
+       read_lock_bh(&ctbl->lock);
+       list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
+               if (addr_len == cte->addr_len &&
+                   memcmp(lip, cte->addr, cte->addr_len) == 0) {
+                       ce = cte;
+                       read_unlock_bh(&ctbl->lock);
+                       goto found;
+               }
+       }
+       read_unlock_bh(&ctbl->lock);
+
+       return;
+found:
+       write_lock_bh(&ctbl->lock);
+       spin_lock_bh(&ce->lock);
+       if (atomic_dec_and_test(&ce->refcnt)) {
+               list_del(&ce->list);
+               INIT_LIST_HEAD(&ce->list);
+               list_add_tail(&ce->list, &ctbl->ce_free_head);
+               atomic_inc(&ctbl->nfree);
+               if (v6)
+                       clip6_release_mbox(dev, (const struct in6_addr *)lip);
+       }
+       spin_unlock_bh(&ce->lock);
+       write_unlock_bh(&ctbl->lock);
+}
+EXPORT_SYMBOL(cxgb4_clip_release);
+
+/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
+ * a physical device.
+ * The physical device reference is needed to send the actul CLIP command.
+ */
+static int cxgb4_update_dev_clip(struct net_device *root_dev,
+                                struct net_device *dev)
+{
+       struct inet6_dev *idev = NULL;
+       struct inet6_ifaddr *ifa;
+       int ret = 0;
+
+       idev = __in6_dev_get(root_dev);
+       if (!idev)
+               return ret;
+
+       read_lock_bh(&idev->lock);
+       list_for_each_entry(ifa, &idev->addr_list, if_list) {
+               ret = cxgb4_clip_get(dev, (const u32 *)ifa->addr.s6_addr, 1);
+               if (ret < 0)
+                       break;
+       }
+       read_unlock_bh(&idev->lock);
+
+       return ret;
+}
+
+int cxgb4_update_root_dev_clip(struct net_device *dev)
+{
+       struct net_device *root_dev = NULL;
+       int i, ret = 0;
+
+       /* First populate the real net device's IPv6 addresses */
+       ret = cxgb4_update_dev_clip(dev, dev);
+       if (ret)
+               return ret;
+
+       /* Parse all bond and vlan devices layered on top of the physical dev */
+       root_dev = netdev_master_upper_dev_get_rcu(dev);
+       if (root_dev) {
+               ret = cxgb4_update_dev_clip(root_dev, dev);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < VLAN_N_VID; i++) {
+               root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
+               if (!root_dev)
+                       continue;
+
+               ret = cxgb4_update_dev_clip(root_dev, dev);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(cxgb4_update_root_dev_clip);
+
+int clip_tbl_show(struct seq_file *seq, void *v)
+{
+       struct adapter *adapter = seq->private;
+       struct clip_tbl *ctbl = adapter->clipt;
+       struct clip_entry *ce;
+       char ip[60];
+       int i;
+
+       read_lock_bh(&ctbl->lock);
+
+       seq_puts(seq, "IP Address                  Users\n");
+       for (i = 0 ; i < ctbl->clipt_size;  ++i) {
+               list_for_each_entry(ce, &ctbl->hash_list[i], list) {
+                       ip[0] = '\0';
+                       if (ce->addr_len == 16)
+                               sprintf(ip, "%pI6c", ce->addr);
+                       else
+                               sprintf(ip, "%pI4c", ce->addr);
+                       seq_printf(seq, "%-25s   %u\n", ip,
+                                  atomic_read(&ce->refcnt));
+               }
+       }
+       seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree));
+
+       read_unlock_bh(&ctbl->lock);
+
+       return 0;
+}
+
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+                                 unsigned int clipt_end)
+{
+       struct clip_entry *cl_list;
+       struct clip_tbl *ctbl;
+       unsigned int clipt_size;
+       int i;
+
+       if (clipt_start >= clipt_end)
+               return NULL;
+       clipt_size = clipt_end - clipt_start + 1;
+       if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
+               return NULL;
+
+       ctbl = t4_alloc_mem(sizeof(*ctbl) +
+                           clipt_size*sizeof(struct list_head));
+       if (!ctbl)
+               return NULL;
+
+       ctbl->clipt_start = clipt_start;
+       ctbl->clipt_size = clipt_size;
+       INIT_LIST_HEAD(&ctbl->ce_free_head);
+
+       atomic_set(&ctbl->nfree, clipt_size);
+       rwlock_init(&ctbl->lock);
+
+       for (i = 0; i < ctbl->clipt_size; ++i)
+               INIT_LIST_HEAD(&ctbl->hash_list[i]);
+
+       cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry));
+       ctbl->cl_list = (void *)cl_list;
+
+       for (i = 0; i < clipt_size; i++) {
+               INIT_LIST_HEAD(&cl_list[i].list);
+               list_add_tail(&cl_list[i].list, &ctbl->ce_free_head);
+       }
+
+       return ctbl;
+}
+
+void t4_cleanup_clip_tbl(struct adapter *adap)
+{
+       struct clip_tbl *ctbl = adap->clipt;
+
+       if (ctbl) {
+               if (ctbl->cl_list)
+                       t4_free_mem(ctbl->cl_list);
+               t4_free_mem(ctbl);
+       }
+}
+EXPORT_SYMBOL(t4_cleanup_clip_tbl);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
new file mode 100644 (file)
index 0000000..2eaba01
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ *  This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *  Copyright (C) 2003-2014 Chelsio Communications.  All rights reserved.
+ *
+ *  Written by Deepak (deepak.s@chelsio.com)
+ *
+ *  This program is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
+ *  release for licensing terms and conditions.
+ */
+
+struct clip_entry {
+       spinlock_t lock;        /* Hold while modifying clip reference */
+       atomic_t refcnt;
+       struct list_head list;
+       u32 addr[4];
+       int addr_len;
+};
+
+struct clip_tbl {
+       unsigned int clipt_start;
+       unsigned int clipt_size;
+       rwlock_t lock;
+       atomic_t nfree;
+       struct list_head ce_free_head;
+       void *cl_list;
+       struct list_head hash_list[0];
+};
+
+enum {
+       CLIPT_MIN_HASH_BUCKETS = 2,
+};
+
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+                                 unsigned int clipt_end);
+int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6);
+void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6);
+int clip_tbl_show(struct seq_file *seq, void *v);
+int cxgb4_update_root_dev_clip(struct net_device *dev);
+void t4_cleanup_clip_tbl(struct adapter *adap);
index 5ab5c3133acd3136df281f22487ae7ea5a5d86bf..0fe3a52fb0b8f5daf3315d1e1e4b498260670bd4 100644 (file)
 #include <asm/io.h>
 #include "cxgb4_uld.h"
 
-#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0C
-#define T4FW_VERSION_MICRO 0x19
-#define T4FW_VERSION_BUILD 0x00
-
-#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0C
-#define T5FW_VERSION_MICRO 0x19
-#define T5FW_VERSION_BUILD 0x00
-
 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
 
 enum {
@@ -231,6 +221,7 @@ struct sge_params {
 struct tp_params {
        unsigned int ntxchan;        /* # of Tx channels */
        unsigned int tre;            /* log2 of core clocks per TP tick */
+       unsigned int la_mask;        /* what events are recorded by TP LA */
        unsigned short tx_modq_map;  /* TX modulation scheduler queue to */
                                     /* channel map */
 
@@ -290,11 +281,21 @@ enum chip_type {
        T5_LAST_REV     = T5_A1,
 };
 
+struct devlog_params {
+       u32 memtype;                    /* which memory (EDC0, EDC1, MC) */
+       u32 start;                      /* start of log in firmware memory */
+       u32 size;                       /* size of log */
+};
+
 struct adapter_params {
        struct sge_params sge;
        struct tp_params  tp;
        struct vpd_params vpd;
        struct pci_params pci;
+       struct devlog_params devlog;
+       enum pcie_memwin drv_memwin;
+
+       unsigned int cim_la_size;
 
        unsigned int sf_size;             /* serial flash size in bytes */
        unsigned int sf_nsec;             /* # of flash sectors */
@@ -476,6 +477,22 @@ struct sge_rspq {                   /* state for an SGE response queue */
        struct adapter *adap;
        struct net_device *netdev;  /* associated net device */
        rspq_handler_t handler;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#define CXGB_POLL_STATE_IDLE           0
+#define CXGB_POLL_STATE_NAPI           BIT(0) /* NAPI owns this poll */
+#define CXGB_POLL_STATE_POLL           BIT(1) /* poll owns this poll */
+#define CXGB_POLL_STATE_NAPI_YIELD     BIT(2) /* NAPI yielded this poll */
+#define CXGB_POLL_STATE_POLL_YIELD     BIT(3) /* poll yielded this poll */
+#define CXGB_POLL_YIELD                        (CXGB_POLL_STATE_NAPI_YIELD |   \
+                                        CXGB_POLL_STATE_POLL_YIELD)
+#define CXGB_POLL_LOCKED               (CXGB_POLL_STATE_NAPI |         \
+                                        CXGB_POLL_STATE_POLL)
+#define CXGB_POLL_USER_PEND            (CXGB_POLL_STATE_POLL |         \
+                                        CXGB_POLL_STATE_POLL_YIELD)
+       unsigned int bpoll_state;
+       spinlock_t bpoll_lock;          /* lock for busy poll */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 };
 
 struct sge_eth_stats {              /* Ethernet queue statistics */
@@ -658,6 +675,9 @@ struct adapter {
        unsigned int l2t_start;
        unsigned int l2t_end;
        struct l2t_data *l2t;
+       unsigned int clipt_start;
+       unsigned int clipt_end;
+       struct clip_tbl *clipt;
        void *uld_handle[CXGB4_ULD_MAX];
        struct list_head list_node;
        struct list_head rcu_node;
@@ -877,6 +897,102 @@ static inline struct adapter *netdev2adap(const struct net_device *dev)
        return netdev2pinfo(dev)->adapter;
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
+{
+       spin_lock_init(&q->bpoll_lock);
+       q->bpoll_state = CXGB_POLL_STATE_IDLE;
+}
+
+static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
+{
+       bool rc = true;
+
+       spin_lock(&q->bpoll_lock);
+       if (q->bpoll_state & CXGB_POLL_LOCKED) {
+               q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD;
+               rc = false;
+       } else {
+               q->bpoll_state = CXGB_POLL_STATE_NAPI;
+       }
+       spin_unlock(&q->bpoll_lock);
+       return rc;
+}
+
+static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
+{
+       bool rc = false;
+
+       spin_lock(&q->bpoll_lock);
+       if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
+               rc = true;
+       q->bpoll_state = CXGB_POLL_STATE_IDLE;
+       spin_unlock(&q->bpoll_lock);
+       return rc;
+}
+
+static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
+{
+       bool rc = true;
+
+       spin_lock_bh(&q->bpoll_lock);
+       if (q->bpoll_state & CXGB_POLL_LOCKED) {
+               q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD;
+               rc = false;
+       } else {
+               q->bpoll_state |= CXGB_POLL_STATE_POLL;
+       }
+       spin_unlock_bh(&q->bpoll_lock);
+       return rc;
+}
+
+static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
+{
+       bool rc = false;
+
+       spin_lock_bh(&q->bpoll_lock);
+       if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
+               rc = true;
+       q->bpoll_state = CXGB_POLL_STATE_IDLE;
+       spin_unlock_bh(&q->bpoll_lock);
+       return rc;
+}
+
+static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
+{
+       return q->bpoll_state & CXGB_POLL_USER_PEND;
+}
+#else
+static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
+{
+}
+
+static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
+{
+       return true;
+}
+
+static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
+{
+       return false;
+}
+
+static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
+{
+       return false;
+}
+
+static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
+{
+       return false;
+}
+
+static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
+{
+       return false;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 void t4_os_portmod_changed(const struct adapter *adap, int port_id);
 void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
 
@@ -905,6 +1021,7 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
 int t4_sge_init(struct adapter *adap);
 void t4_sge_start(struct adapter *adap);
 void t4_sge_stop(struct adapter *adap);
+int cxgb_busy_poll(struct napi_struct *napi);
 extern int dbfifo_int_thresh;
 
 #define for_each_port(adapter, iter) \
@@ -995,7 +1112,10 @@ static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
 
 int t4_seeprom_wp(struct adapter *adapter, bool enable);
 int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+                 unsigned int nwords, u32 *data, int byte_oriented);
 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
                  const u8 *fw_data, unsigned int size, int force);
 unsigned int t4_flash_cfg_addr(struct adapter *adapter);
@@ -1013,6 +1133,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
                      u64 *pbar2_qoffset,
                      unsigned int *pbar2_qid);
 
+unsigned int qtimer_val(const struct adapter *adap,
+                       const struct sge_rspq *q);
 int t4_init_sge_params(struct adapter *adapter);
 int t4_init_tp_params(struct adapter *adap);
 int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
@@ -1022,20 +1144,46 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
                        int start, int n, const u16 *rspq, unsigned int nrspq);
 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
                       unsigned int flags);
+int t4_read_rss(struct adapter *adapter, u16 *entries);
+void t4_read_rss_key(struct adapter *adapter, u32 *key);
+void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
+void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
+                          u32 *valp);
+void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
+                          u32 *vfl, u32 *vfh);
+u32 t4_read_rss_pf_map(struct adapter *adapter);
+u32 t4_read_rss_pf_mask(struct adapter *adapter);
+
 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
               u64 *parity);
 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
                u64 *parity);
+void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
+void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
+int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
+                   size_t n);
+int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data,
+                   size_t n);
+int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
+               unsigned int *valp);
+int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
+                const unsigned int *valp);
+int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
+void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
 const char *t4_get_port_type_description(enum fw_port_type port_type);
 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
+void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
                            unsigned int mask, unsigned int val);
+void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
                         struct tp_tcp_stats *v6);
 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
                  const unsigned short *alpha, const unsigned short *beta);
 
+void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
+
 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
 
 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
index a35d1ec6950e48bcf6003e9f9ccf64b9f2d8a431..6074680bc9858308fa68887140856c8bb05b4d6a 100644 (file)
@@ -22,7 +22,7 @@
 
 /* DCBx version control
  */
-char *dcb_ver_array[] = {
+static const char * const dcb_ver_array[] = {
        "Unknown",
        "DCBx-CIN",
        "DCBx-CEE 1.01",
@@ -428,7 +428,10 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
        }
        *pgid = (be32_to_cpu(pcmd.u.dcb.pgid.pgid) >> (tc * 4)) & 0xf;
 
-       INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+       if (local)
+               INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+       else
+               INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
        pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
        err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
        if (err != FW_PORT_DCB_CFG_SUCCESS) {
@@ -900,6 +903,88 @@ cxgb4_ieee_negotiation_complete(struct net_device *dev,
                (dcb->supported & DCB_CAP_DCBX_VER_IEEE));
 }
 
+static int cxgb4_ieee_read_ets(struct net_device *dev, struct ieee_ets *ets,
+                              int local)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct port_dcb_info *dcb = &pi->dcb;
+       struct adapter *adap = pi->adapter;
+       uint32_t tc_info;
+       struct fw_port_cmd pcmd;
+       int i, bwg, err;
+
+       if (!(dcb->msgs & (CXGB4_DCB_FW_PGID | CXGB4_DCB_FW_PGRATE)))
+               return 0;
+
+       ets->ets_cap =  dcb->pg_num_tcs_supported;
+
+       if (local) {
+               ets->willing = 1;
+               INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+       } else {
+               INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+       }
+
+       pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
+               return err;
+       }
+
+       tc_info = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
+
+       if (local)
+               INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+       else
+               INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+
+       pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+                       -err);
+               return err;
+       }
+
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               bwg = (tc_info >> ((7 - i) * 4)) & 0xF;
+               ets->prio_tc[i] = bwg;
+               ets->tc_tx_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
+               ets->tc_rx_bw[i] = ets->tc_tx_bw[i];
+               ets->tc_tsa[i] = pcmd.u.dcb.pgrate.tsa[i];
+       }
+
+       return 0;
+}
+
+static int cxgb4_ieee_get_ets(struct net_device *dev, struct ieee_ets *ets)
+{
+       return cxgb4_ieee_read_ets(dev, ets, 1);
+}
+
+/* We reuse this for peer PFC as well, as we can't have it enabled one way */
+static int cxgb4_ieee_get_pfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct port_dcb_info *dcb = &pi->dcb;
+
+       memset(pfc, 0, sizeof(struct ieee_pfc));
+
+       if (!(dcb->msgs & CXGB4_DCB_FW_PFC))
+               return 0;
+
+       pfc->pfc_cap = dcb->pfc_num_tcs_supported;
+       pfc->pfc_en = bitswap_1(dcb->pfcen);
+
+       return 0;
+}
+
+static int cxgb4_ieee_peer_ets(struct net_device *dev, struct ieee_ets *ets)
+{
+       return cxgb4_ieee_read_ets(dev, ets, 0);
+}
+
 /* Fill in the Application User Priority Map associated with the
  * specified Application.
  * Priority for IEEE dcb_app is an integer, with 0 being a valid value
@@ -1106,14 +1191,23 @@ static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc)
        struct port_info *pi = netdev2pinfo(dev);
 
        cxgb4_getnumtcs(dev, DCB_NUMTCS_ATTR_PFC, &(pfc->tcs_supported));
-       pfc->pfc_en = pi->dcb.pfcen;
+
+       /* Firmware sends this to us in a formwat that is a bit flipped version
+        * of spec, correct it before we send it to host. This is taken care of
+        * by bit shifting in other uses of pfcen
+        */
+       pfc->pfc_en = bitswap_1(pi->dcb.pfcen);
 
        return 0;
 }
 
 const struct dcbnl_rtnl_ops cxgb4_dcb_ops = {
+       .ieee_getets            = cxgb4_ieee_get_ets,
+       .ieee_getpfc            = cxgb4_ieee_get_pfc,
        .ieee_getapp            = cxgb4_ieee_getapp,
        .ieee_setapp            = cxgb4_ieee_setapp,
+       .ieee_peer_getets       = cxgb4_ieee_peer_ets,
+       .ieee_peer_getpfc       = cxgb4_ieee_get_pfc,
 
        /* CEE std */
        .getstate               = cxgb4_getstate,
index 31ce425616c9cdb112f417cb5c19c89c77a1e14a..ccf24d3dc982478b61e79ee2e850183b58701f45 100644 (file)
@@ -136,6 +136,17 @@ void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *);
 void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *);
 extern const struct dcbnl_rtnl_ops cxgb4_dcb_ops;
 
+static inline __u8 bitswap_1(unsigned char val)
+{
+       return ((val & 0x80) >> 7) |
+              ((val & 0x40) >> 5) |
+              ((val & 0x20) >> 3) |
+              ((val & 0x10) >> 1) |
+              ((val & 0x08) << 1) |
+              ((val & 0x04) << 3) |
+              ((val & 0x02) << 5) |
+              ((val & 0x01) << 7);
+}
 #define CXGB4_DCB_ENABLED true
 
 #else /* !CONFIG_CHELSIO_T4_DCB */
index c98a350d857e27ac3f3216765e6ec797f689d908..d221f6b28fcd2d32dd19f7e539091f793ca40444 100644 (file)
 #include <linux/debugfs.h>
 #include <linux/string_helpers.h>
 #include <linux/sort.h>
+#include <linux/ctype.h>
 
 #include "cxgb4.h"
 #include "t4_regs.h"
+#include "t4_values.h"
 #include "t4fw_api.h"
 #include "cxgb4_debugfs.h"
+#include "clip_tbl.h"
 #include "l2t.h"
 
+/* generic seq_file support for showing a table of size rows x width. */
+static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
+{
+       pos -= tb->skip_first;
+       return pos >= tb->rows ? NULL : &tb->data[pos * tb->width];
+}
+
+static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
+{
+       struct seq_tab *tb = seq->private;
+
+       if (tb->skip_first && *pos == 0)
+               return SEQ_START_TOKEN;
+
+       return seq_tab_get_idx(tb, *pos);
+}
+
+static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       v = seq_tab_get_idx(seq->private, *pos + 1);
+       if (v)
+               ++*pos;
+       return v;
+}
+
+static void seq_tab_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int seq_tab_show(struct seq_file *seq, void *v)
+{
+       const struct seq_tab *tb = seq->private;
+
+       return tb->show(seq, v, ((char *)v - tb->data) / tb->width);
+}
+
+static const struct seq_operations seq_tab_ops = {
+       .start = seq_tab_start,
+       .next  = seq_tab_next,
+       .stop  = seq_tab_stop,
+       .show  = seq_tab_show
+};
+
+struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
+                            unsigned int width, unsigned int have_header,
+                            int (*show)(struct seq_file *seq, void *v, int i))
+{
+       struct seq_tab *p;
+
+       p = __seq_open_private(f, &seq_tab_ops, sizeof(*p) + rows * width);
+       if (p) {
+               p->show = show;
+               p->rows = rows;
+               p->width = width;
+               p->skip_first = have_header != 0;
+       }
+       return p;
+}
+
+/* Trim the size of a seq_tab to the supplied number of rows.  The operation is
+ * irreversible.
+ */
+static int seq_tab_trim(struct seq_tab *p, unsigned int new_rows)
+{
+       if (new_rows > p->rows)
+               return -EINVAL;
+       p->rows = new_rows;
+       return 0;
+}
+
+static int cim_la_show(struct seq_file *seq, void *v, int idx)
+{
+       if (v == SEQ_START_TOKEN)
+               seq_puts(seq, "Status   Data      PC     LS0Stat  LS0Addr "
+                        "            LS0Data\n");
+       else {
+               const u32 *p = v;
+
+               seq_printf(seq,
+                          "  %02x  %x%07x %x%07x %08x %08x %08x%08x%08x%08x\n",
+                          (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
+                          p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
+                          p[6], p[7]);
+       }
+       return 0;
+}
+
+static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx)
+{
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(seq, "Status   Data      PC\n");
+       } else {
+               const u32 *p = v;
+
+               seq_printf(seq, "  %02x   %08x %08x\n", p[5] & 0xff, p[6],
+                          p[7]);
+               seq_printf(seq, "  %02x   %02x%06x %02x%06x\n",
+                          (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
+                          p[4] & 0xff, p[5] >> 8);
+               seq_printf(seq, "  %02x   %x%07x %x%07x\n", (p[0] >> 4) & 0xff,
+                          p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4);
+       }
+       return 0;
+}
+
+static int cim_la_open(struct inode *inode, struct file *file)
+{
+       int ret;
+       unsigned int cfg;
+       struct seq_tab *p;
+       struct adapter *adap = inode->i_private;
+
+       ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
+       if (ret)
+               return ret;
+
+       p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1,
+                        cfg & UPDBGLACAPTPCONLY_F ?
+                        cim_la_show_3in1 : cim_la_show);
+       if (!p)
+               return -ENOMEM;
+
+       ret = t4_cim_read_la(adap, (u32 *)p->data, NULL);
+       if (ret)
+               seq_release_private(inode, file);
+       return ret;
+}
+
+static const struct file_operations cim_la_fops = {
+       .owner   = THIS_MODULE,
+       .open    = cim_la_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release_private
+};
+
+static int cim_qcfg_show(struct seq_file *seq, void *v)
+{
+       static const char * const qname[] = {
+               "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",
+               "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",
+               "SGE0-RX", "SGE1-RX"
+       };
+
+       int i;
+       struct adapter *adap = seq->private;
+       u16 base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+       u16 size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+       u32 stat[(4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5))];
+       u16 thres[CIM_NUM_IBQ];
+       u32 obq_wr_t4[2 * CIM_NUM_OBQ], *wr;
+       u32 obq_wr_t5[2 * CIM_NUM_OBQ_T5];
+       u32 *p = stat;
+       int cim_num_obq = is_t4(adap->params.chip) ?
+                               CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
+
+       i = t4_cim_read(adap, is_t4(adap->params.chip) ? UP_IBQ_0_RDADDR_A :
+                       UP_IBQ_0_SHADOW_RDADDR_A,
+                       ARRAY_SIZE(stat), stat);
+       if (!i) {
+               if (is_t4(adap->params.chip)) {
+                       i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A,
+                                       ARRAY_SIZE(obq_wr_t4), obq_wr_t4);
+                               wr = obq_wr_t4;
+               } else {
+                       i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A,
+                                       ARRAY_SIZE(obq_wr_t5), obq_wr_t5);
+                               wr = obq_wr_t5;
+               }
+       }
+       if (i)
+               return i;
+
+       t4_read_cimq_cfg(adap, base, size, thres);
+
+       seq_printf(seq,
+                  "  Queue  Base  Size Thres  RdPtr WrPtr  SOP  EOP Avail\n");
+       for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
+               seq_printf(seq, "%7s %5x %5u %5u %6x  %4x %4u %4u %5u\n",
+                          qname[i], base[i], size[i], thres[i],
+                          IBQRDADDR_G(p[0]), IBQWRADDR_G(p[1]),
+                          QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
+                          QUEREMFLITS_G(p[2]) * 16);
+       for ( ; i < CIM_NUM_IBQ + cim_num_obq; i++, p += 4, wr += 2)
+               seq_printf(seq, "%7s %5x %5u %12x  %4x %4u %4u %5u\n",
+                          qname[i], base[i], size[i],
+                          QUERDADDR_G(p[0]) & 0x3fff, wr[0] - base[i],
+                          QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
+                          QUEREMFLITS_G(p[2]) * 16);
+       return 0;
+}
+
+static int cim_qcfg_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, cim_qcfg_show, inode->i_private);
+}
+
+static const struct file_operations cim_qcfg_fops = {
+       .owner   = THIS_MODULE,
+       .open    = cim_qcfg_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = single_release,
+};
+
+static int cimq_show(struct seq_file *seq, void *v, int idx)
+{
+       const u32 *p = v;
+
+       seq_printf(seq, "%#06x: %08x %08x %08x %08x\n", idx * 16, p[0], p[1],
+                  p[2], p[3]);
+       return 0;
+}
+
+static int cim_ibq_open(struct inode *inode, struct file *file)
+{
+       int ret;
+       struct seq_tab *p;
+       unsigned int qid = (uintptr_t)inode->i_private & 7;
+       struct adapter *adap = inode->i_private - qid;
+
+       p = seq_open_tab(file, CIM_IBQ_SIZE, 4 * sizeof(u32), 0, cimq_show);
+       if (!p)
+               return -ENOMEM;
+
+       ret = t4_read_cim_ibq(adap, qid, (u32 *)p->data, CIM_IBQ_SIZE * 4);
+       if (ret < 0)
+               seq_release_private(inode, file);
+       else
+               ret = 0;
+       return ret;
+}
+
+static const struct file_operations cim_ibq_fops = {
+       .owner   = THIS_MODULE,
+       .open    = cim_ibq_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release_private
+};
+
+static int cim_obq_open(struct inode *inode, struct file *file)
+{
+       int ret;
+       struct seq_tab *p;
+       unsigned int qid = (uintptr_t)inode->i_private & 7;
+       struct adapter *adap = inode->i_private - qid;
+
+       p = seq_open_tab(file, 6 * CIM_OBQ_SIZE, 4 * sizeof(u32), 0, cimq_show);
+       if (!p)
+               return -ENOMEM;
+
+       ret = t4_read_cim_obq(adap, qid, (u32 *)p->data, 6 * CIM_OBQ_SIZE * 4);
+       if (ret < 0) {
+               seq_release_private(inode, file);
+       } else {
+               seq_tab_trim(p, ret / 4);
+               ret = 0;
+       }
+       return ret;
+}
+
+static const struct file_operations cim_obq_fops = {
+       .owner   = THIS_MODULE,
+       .open    = cim_obq_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release_private
+};
+
+struct field_desc {
+       const char *name;
+       unsigned int start;
+       unsigned int width;
+};
+
+static void field_desc_show(struct seq_file *seq, u64 v,
+                           const struct field_desc *p)
+{
+       char buf[32];
+       int line_size = 0;
+
+       while (p->name) {
+               u64 mask = (1ULL << p->width) - 1;
+               int len = scnprintf(buf, sizeof(buf), "%s: %llu", p->name,
+                                   ((unsigned long long)v >> p->start) & mask);
+
+               if (line_size + len >= 79) {
+                       line_size = 8;
+                       seq_puts(seq, "\n        ");
+               }
+               seq_printf(seq, "%s ", buf);
+               line_size += len + 1;
+               p++;
+       }
+       seq_putc(seq, '\n');
+}
+
+static struct field_desc tp_la0[] = {
+       { "RcfOpCodeOut", 60, 4 },
+       { "State", 56, 4 },
+       { "WcfState", 52, 4 },
+       { "RcfOpcSrcOut", 50, 2 },
+       { "CRxError", 49, 1 },
+       { "ERxError", 48, 1 },
+       { "SanityFailed", 47, 1 },
+       { "SpuriousMsg", 46, 1 },
+       { "FlushInputMsg", 45, 1 },
+       { "FlushInputCpl", 44, 1 },
+       { "RssUpBit", 43, 1 },
+       { "RssFilterHit", 42, 1 },
+       { "Tid", 32, 10 },
+       { "InitTcb", 31, 1 },
+       { "LineNumber", 24, 7 },
+       { "Emsg", 23, 1 },
+       { "EdataOut", 22, 1 },
+       { "Cmsg", 21, 1 },
+       { "CdataOut", 20, 1 },
+       { "EreadPdu", 19, 1 },
+       { "CreadPdu", 18, 1 },
+       { "TunnelPkt", 17, 1 },
+       { "RcfPeerFin", 16, 1 },
+       { "RcfReasonOut", 12, 4 },
+       { "TxCchannel", 10, 2 },
+       { "RcfTxChannel", 8, 2 },
+       { "RxEchannel", 6, 2 },
+       { "RcfRxChannel", 5, 1 },
+       { "RcfDataOutSrdy", 4, 1 },
+       { "RxDvld", 3, 1 },
+       { "RxOoDvld", 2, 1 },
+       { "RxCongestion", 1, 1 },
+       { "TxCongestion", 0, 1 },
+       { NULL }
+};
+
+static int tp_la_show(struct seq_file *seq, void *v, int idx)
+{
+       const u64 *p = v;
+
+       field_desc_show(seq, *p, tp_la0);
+       return 0;
+}
+
+static int tp_la_show2(struct seq_file *seq, void *v, int idx)
+{
+       const u64 *p = v;
+
+       if (idx)
+               seq_putc(seq, '\n');
+       field_desc_show(seq, p[0], tp_la0);
+       if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
+               field_desc_show(seq, p[1], tp_la0);
+       return 0;
+}
+
+static int tp_la_show3(struct seq_file *seq, void *v, int idx)
+{
+       static struct field_desc tp_la1[] = {
+               { "CplCmdIn", 56, 8 },
+               { "CplCmdOut", 48, 8 },
+               { "ESynOut", 47, 1 },
+               { "EAckOut", 46, 1 },
+               { "EFinOut", 45, 1 },
+               { "ERstOut", 44, 1 },
+               { "SynIn", 43, 1 },
+               { "AckIn", 42, 1 },
+               { "FinIn", 41, 1 },
+               { "RstIn", 40, 1 },
+               { "DataIn", 39, 1 },
+               { "DataInVld", 38, 1 },
+               { "PadIn", 37, 1 },
+               { "RxBufEmpty", 36, 1 },
+               { "RxDdp", 35, 1 },
+               { "RxFbCongestion", 34, 1 },
+               { "TxFbCongestion", 33, 1 },
+               { "TxPktSumSrdy", 32, 1 },
+               { "RcfUlpType", 28, 4 },
+               { "Eread", 27, 1 },
+               { "Ebypass", 26, 1 },
+               { "Esave", 25, 1 },
+               { "Static0", 24, 1 },
+               { "Cread", 23, 1 },
+               { "Cbypass", 22, 1 },
+               { "Csave", 21, 1 },
+               { "CPktOut", 20, 1 },
+               { "RxPagePoolFull", 18, 2 },
+               { "RxLpbkPkt", 17, 1 },
+               { "TxLpbkPkt", 16, 1 },
+               { "RxVfValid", 15, 1 },
+               { "SynLearned", 14, 1 },
+               { "SetDelEntry", 13, 1 },
+               { "SetInvEntry", 12, 1 },
+               { "CpcmdDvld", 11, 1 },
+               { "CpcmdSave", 10, 1 },
+               { "RxPstructsFull", 8, 2 },
+               { "EpcmdDvld", 7, 1 },
+               { "EpcmdFlush", 6, 1 },
+               { "EpcmdTrimPrefix", 5, 1 },
+               { "EpcmdTrimPostfix", 4, 1 },
+               { "ERssIp4Pkt", 3, 1 },
+               { "ERssIp6Pkt", 2, 1 },
+               { "ERssTcpUdpPkt", 1, 1 },
+               { "ERssFceFipPkt", 0, 1 },
+               { NULL }
+       };
+       static struct field_desc tp_la2[] = {
+               { "CplCmdIn", 56, 8 },
+               { "MpsVfVld", 55, 1 },
+               { "MpsPf", 52, 3 },
+               { "MpsVf", 44, 8 },
+               { "SynIn", 43, 1 },
+               { "AckIn", 42, 1 },
+               { "FinIn", 41, 1 },
+               { "RstIn", 40, 1 },
+               { "DataIn", 39, 1 },
+               { "DataInVld", 38, 1 },
+               { "PadIn", 37, 1 },
+               { "RxBufEmpty", 36, 1 },
+               { "RxDdp", 35, 1 },
+               { "RxFbCongestion", 34, 1 },
+               { "TxFbCongestion", 33, 1 },
+               { "TxPktSumSrdy", 32, 1 },
+               { "RcfUlpType", 28, 4 },
+               { "Eread", 27, 1 },
+               { "Ebypass", 26, 1 },
+               { "Esave", 25, 1 },
+               { "Static0", 24, 1 },
+               { "Cread", 23, 1 },
+               { "Cbypass", 22, 1 },
+               { "Csave", 21, 1 },
+               { "CPktOut", 20, 1 },
+               { "RxPagePoolFull", 18, 2 },
+               { "RxLpbkPkt", 17, 1 },
+               { "TxLpbkPkt", 16, 1 },
+               { "RxVfValid", 15, 1 },
+               { "SynLearned", 14, 1 },
+               { "SetDelEntry", 13, 1 },
+               { "SetInvEntry", 12, 1 },
+               { "CpcmdDvld", 11, 1 },
+               { "CpcmdSave", 10, 1 },
+               { "RxPstructsFull", 8, 2 },
+               { "EpcmdDvld", 7, 1 },
+               { "EpcmdFlush", 6, 1 },
+               { "EpcmdTrimPrefix", 5, 1 },
+               { "EpcmdTrimPostfix", 4, 1 },
+               { "ERssIp4Pkt", 3, 1 },
+               { "ERssIp6Pkt", 2, 1 },
+               { "ERssTcpUdpPkt", 1, 1 },
+               { "ERssFceFipPkt", 0, 1 },
+               { NULL }
+       };
+       const u64 *p = v;
+
+       if (idx)
+               seq_putc(seq, '\n');
+       field_desc_show(seq, p[0], tp_la0);
+       if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
+               field_desc_show(seq, p[1], (p[0] & BIT(17)) ? tp_la2 : tp_la1);
+       return 0;
+}
+
+static int tp_la_open(struct inode *inode, struct file *file)
+{
+       struct seq_tab *p;
+       struct adapter *adap = inode->i_private;
+
+       switch (DBGLAMODE_G(t4_read_reg(adap, TP_DBG_LA_CONFIG_A))) {
+       case 2:
+               p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0,
+                                tp_la_show2);
+               break;
+       case 3:
+               p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0,
+                                tp_la_show3);
+               break;
+       default:
+               p = seq_open_tab(file, TPLA_SIZE, sizeof(u64), 0, tp_la_show);
+       }
+       if (!p)
+               return -ENOMEM;
+
+       t4_tp_read_la(adap, (u64 *)p->data, NULL);
+       return 0;
+}
+
+static ssize_t tp_la_write(struct file *file, const char __user *buf,
+                          size_t count, loff_t *pos)
+{
+       int err;
+       char s[32];
+       unsigned long val;
+       size_t size = min(sizeof(s) - 1, count);
+       struct adapter *adap = FILE_DATA(file)->i_private;
+
+       if (copy_from_user(s, buf, size))
+               return -EFAULT;
+       s[size] = '\0';
+       err = kstrtoul(s, 0, &val);
+       if (err)
+               return err;
+       if (val > 0xffff)
+               return -EINVAL;
+       adap->params.tp.la_mask = val << 16;
+       t4_set_reg_field(adap, TP_DBG_LA_CONFIG_A, 0xffff0000U,
+                        adap->params.tp.la_mask);
+       return count;
+}
+
+static const struct file_operations tp_la_fops = {
+       .owner   = THIS_MODULE,
+       .open    = tp_la_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release_private,
+       .write   = tp_la_write
+};
+
+static int ulprx_la_show(struct seq_file *seq, void *v, int idx)
+{
+       const u32 *p = v;
+
+       if (v == SEQ_START_TOKEN)
+               seq_puts(seq, "      Pcmd        Type   Message"
+                        "                Data\n");
+       else
+               seq_printf(seq, "%08x%08x  %4x  %08x  %08x%08x%08x%08x\n",
+                          p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
+       return 0;
+}
+
+static int ulprx_la_open(struct inode *inode, struct file *file)
+{
+       struct seq_tab *p;
+       struct adapter *adap = inode->i_private;
+
+       p = seq_open_tab(file, ULPRX_LA_SIZE, 8 * sizeof(u32), 1,
+                        ulprx_la_show);
+       if (!p)
+               return -ENOMEM;
+
+       t4_ulprx_read_la(adap, (u32 *)p->data);
+       return 0;
+}
+
+static const struct file_operations ulprx_la_fops = {
+       .owner   = THIS_MODULE,
+       .open    = ulprx_la_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release_private
+};
+
+/* Show the PM memory stats.  These stats include:
+ *
+ * TX:
+ *   Read: memory read operation
+ *   Write Bypass: cut-through
+ *   Bypass + mem: cut-through and save copy
+ *
+ * RX:
+ *   Read: memory read
+ *   Write Bypass: cut-through
+ *   Flush: payload trim or drop
+ */
+static int pm_stats_show(struct seq_file *seq, void *v)
+{
+       static const char * const tx_pm_stats[] = {
+               "Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
+       };
+       static const char * const rx_pm_stats[] = {
+               "Read:", "Write bypass:", "Write mem:", "Flush:"
+       };
+
+       int i;
+       u32 tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
+       u64 tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
+       struct adapter *adap = seq->private;
+
+       t4_pmtx_get_stats(adap, tx_cnt, tx_cyc);
+       t4_pmrx_get_stats(adap, rx_cnt, rx_cyc);
+
+       seq_printf(seq, "%13s %10s  %20s\n", " ", "Tx pcmds", "Tx bytes");
+       for (i = 0; i < PM_NSTATS - 1; i++)
+               seq_printf(seq, "%-13s %10u  %20llu\n",
+                          tx_pm_stats[i], tx_cnt[i], tx_cyc[i]);
+
+       seq_printf(seq, "%13s %10s  %20s\n", " ", "Rx pcmds", "Rx bytes");
+       for (i = 0; i < PM_NSTATS - 1; i++)
+               seq_printf(seq, "%-13s %10u  %20llu\n",
+                          rx_pm_stats[i], rx_cnt[i], rx_cyc[i]);
+       return 0;
+}
+
+static int pm_stats_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, pm_stats_show, inode->i_private);
+}
+
+static ssize_t pm_stats_clear(struct file *file, const char __user *buf,
+                             size_t count, loff_t *pos)
+{
+       struct adapter *adap = FILE_DATA(file)->i_private;
+
+       t4_write_reg(adap, PM_RX_STAT_CONFIG_A, 0);
+       t4_write_reg(adap, PM_TX_STAT_CONFIG_A, 0);
+       return count;
+}
+
+static const struct file_operations pm_stats_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = pm_stats_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = single_release,
+       .write   = pm_stats_clear
+};
+
+static int cctrl_tbl_show(struct seq_file *seq, void *v)
+{
+       static const char * const dec_fac[] = {
+               "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
+               "0.9375" };
+
+       int i;
+       u16 incr[NMTUS][NCCTRL_WIN];
+       struct adapter *adap = seq->private;
+
+       t4_read_cong_tbl(adap, incr);
+
+       for (i = 0; i < NCCTRL_WIN; ++i) {
+               seq_printf(seq, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
+                          incr[0][i], incr[1][i], incr[2][i], incr[3][i],
+                          incr[4][i], incr[5][i], incr[6][i], incr[7][i]);
+               seq_printf(seq, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
+                          incr[8][i], incr[9][i], incr[10][i], incr[11][i],
+                          incr[12][i], incr[13][i], incr[14][i], incr[15][i],
+                          adap->params.a_wnd[i],
+                          dec_fac[adap->params.b_wnd[i]]);
+       }
+       return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(cctrl_tbl);
+
+/* Format a value in a unit that differs from the value's native unit by the
+ * given factor.
+ */
+static char *unit_conv(char *buf, size_t len, unsigned int val,
+                      unsigned int factor)
+{
+       unsigned int rem = val % factor;
+
+       if (rem == 0) {
+               snprintf(buf, len, "%u", val / factor);
+       } else {
+               while (rem % 10 == 0)
+                       rem /= 10;
+               snprintf(buf, len, "%u.%u", val / factor, rem);
+       }
+       return buf;
+}
+
+static int clk_show(struct seq_file *seq, void *v)
+{
+       char buf[32];
+       struct adapter *adap = seq->private;
+       unsigned int cclk_ps = 1000000000 / adap->params.vpd.cclk;  /* in ps */
+       u32 res = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
+       unsigned int tre = TIMERRESOLUTION_G(res);
+       unsigned int dack_re = DELAYEDACKRESOLUTION_G(res);
+       unsigned long long tp_tick_us = (cclk_ps << tre) / 1000000; /* in us */
+
+       seq_printf(seq, "Core clock period: %s ns\n",
+                  unit_conv(buf, sizeof(buf), cclk_ps, 1000));
+       seq_printf(seq, "TP timer tick: %s us\n",
+                  unit_conv(buf, sizeof(buf), (cclk_ps << tre), 1000000));
+       seq_printf(seq, "TCP timestamp tick: %s us\n",
+                  unit_conv(buf, sizeof(buf),
+                            (cclk_ps << TIMESTAMPRESOLUTION_G(res)), 1000000));
+       seq_printf(seq, "DACK tick: %s us\n",
+                  unit_conv(buf, sizeof(buf), (cclk_ps << dack_re), 1000000));
+       seq_printf(seq, "DACK timer: %u us\n",
+                  ((cclk_ps << dack_re) / 1000000) *
+                  t4_read_reg(adap, TP_DACK_TIMER_A));
+       seq_printf(seq, "Retransmit min: %llu us\n",
+                  tp_tick_us * t4_read_reg(adap, TP_RXT_MIN_A));
+       seq_printf(seq, "Retransmit max: %llu us\n",
+                  tp_tick_us * t4_read_reg(adap, TP_RXT_MAX_A));
+       seq_printf(seq, "Persist timer min: %llu us\n",
+                  tp_tick_us * t4_read_reg(adap, TP_PERS_MIN_A));
+       seq_printf(seq, "Persist timer max: %llu us\n",
+                  tp_tick_us * t4_read_reg(adap, TP_PERS_MAX_A));
+       seq_printf(seq, "Keepalive idle timer: %llu us\n",
+                  tp_tick_us * t4_read_reg(adap, TP_KEEP_IDLE_A));
+       seq_printf(seq, "Keepalive interval: %llu us\n",
+                  tp_tick_us * t4_read_reg(adap, TP_KEEP_INTVL_A));
+       seq_printf(seq, "Initial SRTT: %llu us\n",
+                  tp_tick_us * INITSRTT_G(t4_read_reg(adap, TP_INIT_SRTT_A)));
+       seq_printf(seq, "FINWAIT2 timer: %llu us\n",
+                  tp_tick_us * t4_read_reg(adap, TP_FINWAIT2_TIMER_A));
+
+       return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(clk);
+
+/* Firmware Device Log dump. */
+static const char * const devlog_level_strings[] = {
+       [FW_DEVLOG_LEVEL_EMERG]         = "EMERG",
+       [FW_DEVLOG_LEVEL_CRIT]          = "CRIT",
+       [FW_DEVLOG_LEVEL_ERR]           = "ERR",
+       [FW_DEVLOG_LEVEL_NOTICE]        = "NOTICE",
+       [FW_DEVLOG_LEVEL_INFO]          = "INFO",
+       [FW_DEVLOG_LEVEL_DEBUG]         = "DEBUG"
+};
+
+static const char * const devlog_facility_strings[] = {
+       [FW_DEVLOG_FACILITY_CORE]       = "CORE",
+       [FW_DEVLOG_FACILITY_SCHED]      = "SCHED",
+       [FW_DEVLOG_FACILITY_TIMER]      = "TIMER",
+       [FW_DEVLOG_FACILITY_RES]        = "RES",
+       [FW_DEVLOG_FACILITY_HW]         = "HW",
+       [FW_DEVLOG_FACILITY_FLR]        = "FLR",
+       [FW_DEVLOG_FACILITY_DMAQ]       = "DMAQ",
+       [FW_DEVLOG_FACILITY_PHY]        = "PHY",
+       [FW_DEVLOG_FACILITY_MAC]        = "MAC",
+       [FW_DEVLOG_FACILITY_PORT]       = "PORT",
+       [FW_DEVLOG_FACILITY_VI]         = "VI",
+       [FW_DEVLOG_FACILITY_FILTER]     = "FILTER",
+       [FW_DEVLOG_FACILITY_ACL]        = "ACL",
+       [FW_DEVLOG_FACILITY_TM]         = "TM",
+       [FW_DEVLOG_FACILITY_QFC]        = "QFC",
+       [FW_DEVLOG_FACILITY_DCB]        = "DCB",
+       [FW_DEVLOG_FACILITY_ETH]        = "ETH",
+       [FW_DEVLOG_FACILITY_OFLD]       = "OFLD",
+       [FW_DEVLOG_FACILITY_RI]         = "RI",
+       [FW_DEVLOG_FACILITY_ISCSI]      = "ISCSI",
+       [FW_DEVLOG_FACILITY_FCOE]       = "FCOE",
+       [FW_DEVLOG_FACILITY_FOISCSI]    = "FOISCSI",
+       [FW_DEVLOG_FACILITY_FOFCOE]     = "FOFCOE"
+};
+
+/* Information gathered by Device Log Open routine for the display routine.
+ */
+struct devlog_info {
+       unsigned int nentries;          /* number of entries in log[] */
+       unsigned int first;             /* first [temporal] entry in log[] */
+       struct fw_devlog_e log[0];      /* Firmware Device Log */
+};
+
+/* Dump a Firmaware Device Log entry.
+ */
+static int devlog_show(struct seq_file *seq, void *v)
+{
+       if (v == SEQ_START_TOKEN)
+               seq_printf(seq, "%10s  %15s  %8s  %8s  %s\n",
+                          "Seq#", "Tstamp", "Level", "Facility", "Message");
+       else {
+               struct devlog_info *dinfo = seq->private;
+               int fidx = (uintptr_t)v - 2;
+               unsigned long index;
+               struct fw_devlog_e *e;
+
+               /* Get a pointer to the log entry to display.  Skip unused log
+                * entries.
+                */
+               index = dinfo->first + fidx;
+               if (index >= dinfo->nentries)
+                       index -= dinfo->nentries;
+               e = &dinfo->log[index];
+               if (e->timestamp == 0)
+                       return 0;
+
+               /* Print the message.  This depends on the firmware using
+                * exactly the same formating strings as the kernel so we may
+                * eventually have to put a format interpreter in here ...
+                */
+               seq_printf(seq, "%10d  %15llu  %8s  %8s  ",
+                          e->seqno, e->timestamp,
+                          (e->level < ARRAY_SIZE(devlog_level_strings)
+                           ? devlog_level_strings[e->level]
+                           : "UNKNOWN"),
+                          (e->facility < ARRAY_SIZE(devlog_facility_strings)
+                           ? devlog_facility_strings[e->facility]
+                           : "UNKNOWN"));
+               seq_printf(seq, e->fmt, e->params[0], e->params[1],
+                          e->params[2], e->params[3], e->params[4],
+                          e->params[5], e->params[6], e->params[7]);
+       }
+       return 0;
+}
+
+/* Sequential File Operations for Device Log.
+ */
+static inline void *devlog_get_idx(struct devlog_info *dinfo, loff_t pos)
+{
+       if (pos > dinfo->nentries)
+               return NULL;
+
+       return (void *)(uintptr_t)(pos + 1);
+}
+
+static void *devlog_start(struct seq_file *seq, loff_t *pos)
+{
+       struct devlog_info *dinfo = seq->private;
+
+       return (*pos
+               ? devlog_get_idx(dinfo, *pos)
+               : SEQ_START_TOKEN);
+}
+
+static void *devlog_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       struct devlog_info *dinfo = seq->private;
+
+       (*pos)++;
+       return devlog_get_idx(dinfo, *pos);
+}
+
+static void devlog_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations devlog_seq_ops = {
+       .start = devlog_start,
+       .next  = devlog_next,
+       .stop  = devlog_stop,
+       .show  = devlog_show
+};
+
+/* Set up for reading the firmware's device log.  We read the entire log here
+ * and then display it incrementally in devlog_show().
+ */
+static int devlog_open(struct inode *inode, struct file *file)
+{
+       struct adapter *adap = inode->i_private;
+       struct devlog_params *dparams = &adap->params.devlog;
+       struct devlog_info *dinfo;
+       unsigned int index;
+       u32 fseqno;
+       int ret;
+
+       /* If we don't know where the log is we can't do anything.
+        */
+       if (dparams->start == 0)
+               return -ENXIO;
+
+       /* Allocate the space to read in the firmware's device log and set up
+        * for the iterated call to our display function.
+        */
+       dinfo = __seq_open_private(file, &devlog_seq_ops,
+                                  sizeof(*dinfo) + dparams->size);
+       if (!dinfo)
+               return -ENOMEM;
+
+       /* Record the basic log buffer information and read in the raw log.
+        */
+       dinfo->nentries = (dparams->size / sizeof(struct fw_devlog_e));
+       dinfo->first = 0;
+       spin_lock(&adap->win0_lock);
+       ret = t4_memory_rw(adap, adap->params.drv_memwin, dparams->memtype,
+                          dparams->start, dparams->size, (__be32 *)dinfo->log,
+                          T4_MEMORY_READ);
+       spin_unlock(&adap->win0_lock);
+       if (ret) {
+               seq_release_private(inode, file);
+               return ret;
+       }
+
+       /* Translate log multi-byte integral elements into host native format
+        * and determine where the first entry in the log is.
+        */
+       for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) {
+               struct fw_devlog_e *e = &dinfo->log[index];
+               int i;
+               __u32 seqno;
+
+               if (e->timestamp == 0)
+                       continue;
+
+               e->timestamp = (__force __be64)be64_to_cpu(e->timestamp);
+               seqno = be32_to_cpu(e->seqno);
+               for (i = 0; i < 8; i++)
+                       e->params[i] =
+                               (__force __be32)be32_to_cpu(e->params[i]);
+
+               if (seqno < fseqno) {
+                       fseqno = seqno;
+                       dinfo->first = index;
+               }
+       }
+       return 0;
+}
+
+static const struct file_operations devlog_fops = {
+       .owner   = THIS_MODULE,
+       .open    = devlog_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release_private
+};
+
+static int mbox_show(struct seq_file *seq, void *v)
+{
+       static const char * const owner[] = { "none", "FW", "driver",
+                                             "unknown" };
+
+       int i;
+       unsigned int mbox = (uintptr_t)seq->private & 7;
+       struct adapter *adap = seq->private - mbox;
+       void __iomem *addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
+       unsigned int ctrl_reg = (is_t4(adap->params.chip)
+                                ? CIM_PF_MAILBOX_CTRL_A
+                                : CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A);
+       void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg);
+
+       i = MBOWNER_G(readl(ctrl));
+       seq_printf(seq, "mailbox owned by %s\n\n", owner[i]);
+
+       for (i = 0; i < MBOX_LEN; i += 8)
+               seq_printf(seq, "%016llx\n",
+                          (unsigned long long)readq(addr + i));
+       return 0;
+}
+
+static int mbox_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, mbox_show, inode->i_private);
+}
+
+static ssize_t mbox_write(struct file *file, const char __user *buf,
+                         size_t count, loff_t *pos)
+{
+       int i;
+       char c = '\n', s[256];
+       unsigned long long data[8];
+       const struct inode *ino;
+       unsigned int mbox;
+       struct adapter *adap;
+       void __iomem *addr;
+       void __iomem *ctrl;
+
+       if (count > sizeof(s) - 1 || !count)
+               return -EINVAL;
+       if (copy_from_user(s, buf, count))
+               return -EFAULT;
+       s[count] = '\0';
+
+       if (sscanf(s, "%llx %llx %llx %llx %llx %llx %llx %llx%c", &data[0],
+                  &data[1], &data[2], &data[3], &data[4], &data[5], &data[6],
+                  &data[7], &c) < 8 || c != '\n')
+               return -EINVAL;
+
+       ino = FILE_DATA(file);
+       mbox = (uintptr_t)ino->i_private & 7;
+       adap = ino->i_private - mbox;
+       addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
+       ctrl = addr + MBOX_LEN;
+
+       if (MBOWNER_G(readl(ctrl)) != X_MBOWNER_PL)
+               return -EBUSY;
+
+       for (i = 0; i < 8; i++)
+               writeq(data[i], addr + 8 * i);
+
+       writel(MBMSGVALID_F | MBOWNER_V(X_MBOWNER_FW), ctrl);
+       return count;
+}
+
+static const struct file_operations mbox_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = mbox_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = single_release,
+       .write   = mbox_write
+};
+
+static ssize_t flash_read(struct file *file, char __user *buf, size_t count,
+                         loff_t *ppos)
+{
+       loff_t pos = *ppos;
+       loff_t avail = FILE_DATA(file)->i_size;
+       struct adapter *adap = file->private_data;
+
+       if (pos < 0)
+               return -EINVAL;
+       if (pos >= avail)
+               return 0;
+       if (count > avail - pos)
+               count = avail - pos;
+
+       while (count) {
+               size_t len;
+               int ret, ofst;
+               u8 data[256];
+
+               ofst = pos & 3;
+               len = min(count + ofst, sizeof(data));
+               ret = t4_read_flash(adap, pos - ofst, (len + 3) / 4,
+                                   (u32 *)data, 1);
+               if (ret)
+                       return ret;
+
+               len -= ofst;
+               if (copy_to_user(buf, data + ofst, len))
+                       return -EFAULT;
+
+               buf += len;
+               pos += len;
+               count -= len;
+       }
+       count = pos - *ppos;
+       *ppos = pos;
+       return count;
+}
+
+static const struct file_operations flash_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = mem_open,
+       .read    = flash_read,
+};
+
+static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
+{
+       *mask = x | y;
+       y = (__force u64)cpu_to_be64(y);
+       memcpy(addr, (char *)&y + 2, ETH_ALEN);
+}
+
+static int mps_tcam_show(struct seq_file *seq, void *v)
+{
+       if (v == SEQ_START_TOKEN)
+               seq_puts(seq, "Idx  Ethernet address     Mask     Vld Ports PF"
+                        "  VF              Replication             "
+                        "P0 P1 P2 P3  ML\n");
+       else {
+               u64 mask;
+               u8 addr[ETH_ALEN];
+               struct adapter *adap = seq->private;
+               unsigned int idx = (uintptr_t)v - 2;
+               u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
+               u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
+               u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
+               u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
+               u32 rplc[4] = {0, 0, 0, 0};
+
+               if (tcamx & tcamy) {
+                       seq_printf(seq, "%3u         -\n", idx);
+                       goto out;
+               }
+
+               if (cls_lo & REPLICATE_F) {
+                       struct fw_ldst_cmd ldst_cmd;
+                       int ret;
+
+                       memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+                       ldst_cmd.op_to_addrspace =
+                               htonl(FW_CMD_OP_V(FW_LDST_CMD) |
+                                     FW_CMD_REQUEST_F |
+                                     FW_CMD_READ_F |
+                                     FW_LDST_CMD_ADDRSPACE_V(
+                                             FW_LDST_ADDRSPC_MPS));
+                       ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
+                       ldst_cmd.u.mps.fid_ctl =
+                               htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
+                                     FW_LDST_CMD_CTL_V(idx));
+                       ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd,
+                                        sizeof(ldst_cmd), &ldst_cmd);
+                       if (ret)
+                               dev_warn(adap->pdev_dev, "Can't read MPS "
+                                        "replication map for idx %d: %d\n",
+                                        idx, -ret);
+                       else {
+                               rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0);
+                               rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32);
+                               rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64);
+                               rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96);
+                       }
+               }
+
+               tcamxy2valmask(tcamx, tcamy, addr, &mask);
+               seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx"
+                          "%3c   %#x%4u%4d",
+                          idx, addr[0], addr[1], addr[2], addr[3], addr[4],
+                          addr[5], (unsigned long long)mask,
+                          (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi),
+                          PF_G(cls_lo),
+                          (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
+               if (cls_lo & REPLICATE_F)
+                       seq_printf(seq, " %08x %08x %08x %08x",
+                                  rplc[3], rplc[2], rplc[1], rplc[0]);
+               else
+                       seq_printf(seq, "%36c", ' ');
+               seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+                          SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
+                          SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
+                          (cls_lo >> MULTILISTEN0_S) & 0xf);
+       }
+out:   return 0;
+}
+
+static inline void *mps_tcam_get_idx(struct seq_file *seq, loff_t pos)
+{
+       struct adapter *adap = seq->private;
+       int max_mac_addr = is_t4(adap->params.chip) ?
+                               NUM_MPS_CLS_SRAM_L_INSTANCES :
+                               NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+       return ((pos <= max_mac_addr) ? (void *)(uintptr_t)(pos + 1) : NULL);
+}
+
+static void *mps_tcam_start(struct seq_file *seq, loff_t *pos)
+{
+       return *pos ? mps_tcam_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *mps_tcam_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       ++*pos;
+       return mps_tcam_get_idx(seq, *pos);
+}
+
+static void mps_tcam_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations mps_tcam_seq_ops = {
+       .start = mps_tcam_start,
+       .next  = mps_tcam_next,
+       .stop  = mps_tcam_stop,
+       .show  = mps_tcam_show
+};
+
+static int mps_tcam_open(struct inode *inode, struct file *file)
+{
+       int res = seq_open(file, &mps_tcam_seq_ops);
+
+       if (!res) {
+               struct seq_file *seq = file->private_data;
+
+               seq->private = inode->i_private;
+       }
+       return res;
+}
+
+static const struct file_operations mps_tcam_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = mps_tcam_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release,
+};
+
+/* Display various sensor information.
+ */
+static int sensors_show(struct seq_file *seq, void *v)
+{
+       struct adapter *adap = seq->private;
+       u32 param[7], val[7];
+       int ret;
+
+       /* Note that if the sensors haven't been initialized and turned on
+        * we'll get values of 0, so treat those as "<unknown>" ...
+        */
+       param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+                   FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
+                   FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_TMP));
+       param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+                   FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
+                   FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD));
+       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+                             param, val);
+
+       if (ret < 0 || val[0] == 0)
+               seq_puts(seq, "Temperature: <unknown>\n");
+       else
+               seq_printf(seq, "Temperature: %dC\n", val[0]);
+
+       if (ret < 0 || val[1] == 0)
+               seq_puts(seq, "Core VDD:    <unknown>\n");
+       else
+               seq_printf(seq, "Core VDD:    %dmV\n", val[1]);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(sensors);
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int clip_tbl_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, clip_tbl_show, inode->i_private);
+}
+
+static const struct file_operations clip_tbl_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = clip_tbl_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = single_release
+};
+#endif
+
+/*RSS Table.
+ */
+
+static int rss_show(struct seq_file *seq, void *v, int idx)
+{
+       u16 *entry = v;
+
+       seq_printf(seq, "%4d:  %4u  %4u  %4u  %4u  %4u  %4u  %4u  %4u\n",
+                  idx * 8, entry[0], entry[1], entry[2], entry[3], entry[4],
+                  entry[5], entry[6], entry[7]);
+       return 0;
+}
+
+static int rss_open(struct inode *inode, struct file *file)
+{
+       int ret;
+       struct seq_tab *p;
+       struct adapter *adap = inode->i_private;
+
+       p = seq_open_tab(file, RSS_NENTRIES / 8, 8 * sizeof(u16), 0, rss_show);
+       if (!p)
+               return -ENOMEM;
+
+       ret = t4_read_rss(adap, (u16 *)p->data);
+       if (ret)
+               seq_release_private(inode, file);
+
+       return ret;
+}
+
+static const struct file_operations rss_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = rss_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release_private
+};
+
+/* RSS Configuration.
+ */
+
+/* Small utility function to return the strings "yes" or "no" if the supplied
+ * argument is non-zero.
+ */
+static const char *yesno(int x)
+{
+       static const char *yes = "yes";
+       static const char *no = "no";
+
+       return x ? yes : no;
+}
+
+static int rss_config_show(struct seq_file *seq, void *v)
+{
+       struct adapter *adapter = seq->private;
+       static const char * const keymode[] = {
+               "global",
+               "global and per-VF scramble",
+               "per-PF and per-VF scramble",
+               "per-VF and per-VF scramble",
+       };
+       u32 rssconf;
+
+       rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_A);
+       seq_printf(seq, "TP_RSS_CONFIG: %#x\n", rssconf);
+       seq_printf(seq, "  Tnl4TupEnIpv6: %3s\n", yesno(rssconf &
+                                                       TNL4TUPENIPV6_F));
+       seq_printf(seq, "  Tnl2TupEnIpv6: %3s\n", yesno(rssconf &
+                                                       TNL2TUPENIPV6_F));
+       seq_printf(seq, "  Tnl4TupEnIpv4: %3s\n", yesno(rssconf &
+                                                       TNL4TUPENIPV4_F));
+       seq_printf(seq, "  Tnl2TupEnIpv4: %3s\n", yesno(rssconf &
+                                                       TNL2TUPENIPV4_F));
+       seq_printf(seq, "  TnlTcpSel:     %3s\n", yesno(rssconf & TNLTCPSEL_F));
+       seq_printf(seq, "  TnlIp6Sel:     %3s\n", yesno(rssconf & TNLIP6SEL_F));
+       seq_printf(seq, "  TnlVrtSel:     %3s\n", yesno(rssconf & TNLVRTSEL_F));
+       seq_printf(seq, "  TnlMapEn:      %3s\n", yesno(rssconf & TNLMAPEN_F));
+       seq_printf(seq, "  OfdHashSave:   %3s\n", yesno(rssconf &
+                                                       OFDHASHSAVE_F));
+       seq_printf(seq, "  OfdVrtSel:     %3s\n", yesno(rssconf & OFDVRTSEL_F));
+       seq_printf(seq, "  OfdMapEn:      %3s\n", yesno(rssconf & OFDMAPEN_F));
+       seq_printf(seq, "  OfdLkpEn:      %3s\n", yesno(rssconf & OFDLKPEN_F));
+       seq_printf(seq, "  Syn4TupEnIpv6: %3s\n", yesno(rssconf &
+                                                       SYN4TUPENIPV6_F));
+       seq_printf(seq, "  Syn2TupEnIpv6: %3s\n", yesno(rssconf &
+                                                       SYN2TUPENIPV6_F));
+       seq_printf(seq, "  Syn4TupEnIpv4: %3s\n", yesno(rssconf &
+                                                       SYN4TUPENIPV4_F));
+       seq_printf(seq, "  Syn2TupEnIpv4: %3s\n", yesno(rssconf &
+                                                       SYN2TUPENIPV4_F));
+       seq_printf(seq, "  Syn4TupEnIpv6: %3s\n", yesno(rssconf &
+                                                       SYN4TUPENIPV6_F));
+       seq_printf(seq, "  SynIp6Sel:     %3s\n", yesno(rssconf & SYNIP6SEL_F));
+       seq_printf(seq, "  SynVrt6Sel:    %3s\n", yesno(rssconf & SYNVRTSEL_F));
+       seq_printf(seq, "  SynMapEn:      %3s\n", yesno(rssconf & SYNMAPEN_F));
+       seq_printf(seq, "  SynLkpEn:      %3s\n", yesno(rssconf & SYNLKPEN_F));
+       seq_printf(seq, "  ChnEn:         %3s\n", yesno(rssconf &
+                                                       CHANNELENABLE_F));
+       seq_printf(seq, "  PrtEn:         %3s\n", yesno(rssconf &
+                                                       PORTENABLE_F));
+       seq_printf(seq, "  TnlAllLkp:     %3s\n", yesno(rssconf &
+                                                       TNLALLLOOKUP_F));
+       seq_printf(seq, "  VrtEn:         %3s\n", yesno(rssconf &
+                                                       VIRTENABLE_F));
+       seq_printf(seq, "  CngEn:         %3s\n", yesno(rssconf &
+                                                       CONGESTIONENABLE_F));
+       seq_printf(seq, "  HashToeplitz:  %3s\n", yesno(rssconf &
+                                                       HASHTOEPLITZ_F));
+       seq_printf(seq, "  Udp4En:        %3s\n", yesno(rssconf & UDPENABLE_F));
+       seq_printf(seq, "  Disable:       %3s\n", yesno(rssconf & DISABLE_F));
+
+       seq_puts(seq, "\n");
+
+       rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_TNL_A);
+       seq_printf(seq, "TP_RSS_CONFIG_TNL: %#x\n", rssconf);
+       seq_printf(seq, "  MaskSize:      %3d\n", MASKSIZE_G(rssconf));
+       seq_printf(seq, "  MaskFilter:    %3d\n", MASKFILTER_G(rssconf));
+       if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) {
+               seq_printf(seq, "  HashAll:     %3s\n",
+                          yesno(rssconf & HASHALL_F));
+               seq_printf(seq, "  HashEth:     %3s\n",
+                          yesno(rssconf & HASHETH_F));
+       }
+       seq_printf(seq, "  UseWireCh:     %3s\n", yesno(rssconf & USEWIRECH_F));
+
+       seq_puts(seq, "\n");
+
+       rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_OFD_A);
+       seq_printf(seq, "TP_RSS_CONFIG_OFD: %#x\n", rssconf);
+       seq_printf(seq, "  MaskSize:      %3d\n", MASKSIZE_G(rssconf));
+       seq_printf(seq, "  RRCplMapEn:    %3s\n", yesno(rssconf &
+                                                       RRCPLMAPEN_F));
+       seq_printf(seq, "  RRCplQueWidth: %3d\n", RRCPLQUEWIDTH_G(rssconf));
+
+       seq_puts(seq, "\n");
+
+       rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_SYN_A);
+       seq_printf(seq, "TP_RSS_CONFIG_SYN: %#x\n", rssconf);
+       seq_printf(seq, "  MaskSize:      %3d\n", MASKSIZE_G(rssconf));
+       seq_printf(seq, "  UseWireCh:     %3s\n", yesno(rssconf & USEWIRECH_F));
+
+       seq_puts(seq, "\n");
+
+       rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
+       seq_printf(seq, "TP_RSS_CONFIG_VRT: %#x\n", rssconf);
+       if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) {
+               seq_printf(seq, "  KeyWrAddrX:     %3d\n",
+                          KEYWRADDRX_G(rssconf));
+               seq_printf(seq, "  KeyExtend:      %3s\n",
+                          yesno(rssconf & KEYEXTEND_F));
+       }
+       seq_printf(seq, "  VfRdRg:        %3s\n", yesno(rssconf & VFRDRG_F));
+       seq_printf(seq, "  VfRdEn:        %3s\n", yesno(rssconf & VFRDEN_F));
+       seq_printf(seq, "  VfPerrEn:      %3s\n", yesno(rssconf & VFPERREN_F));
+       seq_printf(seq, "  KeyPerrEn:     %3s\n", yesno(rssconf & KEYPERREN_F));
+       seq_printf(seq, "  DisVfVlan:     %3s\n", yesno(rssconf &
+                                                       DISABLEVLAN_F));
+       seq_printf(seq, "  EnUpSwt:       %3s\n", yesno(rssconf & ENABLEUP0_F));
+       seq_printf(seq, "  HashDelay:     %3d\n", HASHDELAY_G(rssconf));
+       if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+               seq_printf(seq, "  VfWrAddr:      %3d\n", VFWRADDR_G(rssconf));
+       seq_printf(seq, "  KeyMode:       %s\n", keymode[KEYMODE_G(rssconf)]);
+       seq_printf(seq, "  VfWrEn:        %3s\n", yesno(rssconf & VFWREN_F));
+       seq_printf(seq, "  KeyWrEn:       %3s\n", yesno(rssconf & KEYWREN_F));
+       seq_printf(seq, "  KeyWrAddr:     %3d\n", KEYWRADDR_G(rssconf));
+
+       seq_puts(seq, "\n");
+
+       rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_CNG_A);
+       seq_printf(seq, "TP_RSS_CONFIG_CNG: %#x\n", rssconf);
+       seq_printf(seq, "  ChnCount3:     %3s\n", yesno(rssconf & CHNCOUNT3_F));
+       seq_printf(seq, "  ChnCount2:     %3s\n", yesno(rssconf & CHNCOUNT2_F));
+       seq_printf(seq, "  ChnCount1:     %3s\n", yesno(rssconf & CHNCOUNT1_F));
+       seq_printf(seq, "  ChnCount0:     %3s\n", yesno(rssconf & CHNCOUNT0_F));
+       seq_printf(seq, "  ChnUndFlow3:   %3s\n", yesno(rssconf &
+                                                       CHNUNDFLOW3_F));
+       seq_printf(seq, "  ChnUndFlow2:   %3s\n", yesno(rssconf &
+                                                       CHNUNDFLOW2_F));
+       seq_printf(seq, "  ChnUndFlow1:   %3s\n", yesno(rssconf &
+                                                       CHNUNDFLOW1_F));
+       seq_printf(seq, "  ChnUndFlow0:   %3s\n", yesno(rssconf &
+                                                       CHNUNDFLOW0_F));
+       seq_printf(seq, "  RstChn3:       %3s\n", yesno(rssconf & RSTCHN3_F));
+       seq_printf(seq, "  RstChn2:       %3s\n", yesno(rssconf & RSTCHN2_F));
+       seq_printf(seq, "  RstChn1:       %3s\n", yesno(rssconf & RSTCHN1_F));
+       seq_printf(seq, "  RstChn0:       %3s\n", yesno(rssconf & RSTCHN0_F));
+       seq_printf(seq, "  UpdVld:        %3s\n", yesno(rssconf & UPDVLD_F));
+       seq_printf(seq, "  Xoff:          %3s\n", yesno(rssconf & XOFF_F));
+       seq_printf(seq, "  UpdChn3:       %3s\n", yesno(rssconf & UPDCHN3_F));
+       seq_printf(seq, "  UpdChn2:       %3s\n", yesno(rssconf & UPDCHN2_F));
+       seq_printf(seq, "  UpdChn1:       %3s\n", yesno(rssconf & UPDCHN1_F));
+       seq_printf(seq, "  UpdChn0:       %3s\n", yesno(rssconf & UPDCHN0_F));
+       seq_printf(seq, "  Queue:         %3d\n", QUEUE_G(rssconf));
+
+       return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(rss_config);
+
+/* RSS Secret Key.
+ */
+
+static int rss_key_show(struct seq_file *seq, void *v)
+{
+       u32 key[10];
+
+       t4_read_rss_key(seq->private, key);
+       seq_printf(seq, "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+                  key[9], key[8], key[7], key[6], key[5], key[4], key[3],
+                  key[2], key[1], key[0]);
+       return 0;
+}
+
+static int rss_key_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, rss_key_show, inode->i_private);
+}
+
+static ssize_t rss_key_write(struct file *file, const char __user *buf,
+                            size_t count, loff_t *pos)
+{
+       int i, j;
+       u32 key[10];
+       char s[100], *p;
+       struct adapter *adap = FILE_DATA(file)->i_private;
+
+       if (count > sizeof(s) - 1)
+               return -EINVAL;
+       if (copy_from_user(s, buf, count))
+               return -EFAULT;
+       for (i = count; i > 0 && isspace(s[i - 1]); i--)
+               ;
+       s[i] = '\0';
+
+       for (p = s, i = 9; i >= 0; i--) {
+               key[i] = 0;
+               for (j = 0; j < 8; j++, p++) {
+                       if (!isxdigit(*p))
+                               return -EINVAL;
+                       key[i] = (key[i] << 4) | hex2val(*p);
+               }
+       }
+
+       t4_write_rss_key(adap, key, -1);
+       return count;
+}
+
+static const struct file_operations rss_key_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = rss_key_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = single_release,
+       .write   = rss_key_write
+};
+
+/* PF RSS Configuration.
+ */
+
+struct rss_pf_conf {
+       u32 rss_pf_map;
+       u32 rss_pf_mask;
+       u32 rss_pf_config;
+};
+
+static int rss_pf_config_show(struct seq_file *seq, void *v, int idx)
+{
+       struct rss_pf_conf *pfconf;
+
+       if (v == SEQ_START_TOKEN) {
+               /* use the 0th entry to dump the PF Map Index Size */
+               pfconf = seq->private + offsetof(struct seq_tab, data);
+               seq_printf(seq, "PF Map Index Size = %d\n\n",
+                          LKPIDXSIZE_G(pfconf->rss_pf_map));
+
+               seq_puts(seq, "     RSS              PF   VF    Hash Tuple Enable         Default\n");
+               seq_puts(seq, "     Enable       IPF Mask Mask  IPv6      IPv4      UDP   Queue\n");
+               seq_puts(seq, " PF  Map Chn Prt  Map Size Size  Four Two  Four Two  Four  Ch1  Ch0\n");
+       } else {
+               #define G_PFnLKPIDX(map, n) \
+                       (((map) >> PF1LKPIDX_S*(n)) & PF0LKPIDX_M)
+               #define G_PFnMSKSIZE(mask, n) \
+                       (((mask) >> PF1MSKSIZE_S*(n)) & PF1MSKSIZE_M)
+
+               pfconf = v;
+               seq_printf(seq, "%3d  %3s %3s %3s  %3d  %3d  %3d   %3s %3s   %3s %3s   %3s  %3d  %3d\n",
+                          idx,
+                          yesno(pfconf->rss_pf_config & MAPENABLE_F),
+                          yesno(pfconf->rss_pf_config & CHNENABLE_F),
+                          yesno(pfconf->rss_pf_config & PRTENABLE_F),
+                          G_PFnLKPIDX(pfconf->rss_pf_map, idx),
+                          G_PFnMSKSIZE(pfconf->rss_pf_mask, idx),
+                          IVFWIDTH_G(pfconf->rss_pf_config),
+                          yesno(pfconf->rss_pf_config & IP6FOURTUPEN_F),
+                          yesno(pfconf->rss_pf_config & IP6TWOTUPEN_F),
+                          yesno(pfconf->rss_pf_config & IP4FOURTUPEN_F),
+                          yesno(pfconf->rss_pf_config & IP4TWOTUPEN_F),
+                          yesno(pfconf->rss_pf_config & UDPFOURTUPEN_F),
+                          CH1DEFAULTQUEUE_G(pfconf->rss_pf_config),
+                          CH0DEFAULTQUEUE_G(pfconf->rss_pf_config));
+
+               #undef G_PFnLKPIDX
+               #undef G_PFnMSKSIZE
+       }
+       return 0;
+}
+
+static int rss_pf_config_open(struct inode *inode, struct file *file)
+{
+       struct adapter *adapter = inode->i_private;
+       struct seq_tab *p;
+       u32 rss_pf_map, rss_pf_mask;
+       struct rss_pf_conf *pfconf;
+       int pf;
+
+       p = seq_open_tab(file, 8, sizeof(*pfconf), 1, rss_pf_config_show);
+       if (!p)
+               return -ENOMEM;
+
+       pfconf = (struct rss_pf_conf *)p->data;
+       rss_pf_map = t4_read_rss_pf_map(adapter);
+       rss_pf_mask = t4_read_rss_pf_mask(adapter);
+       for (pf = 0; pf < 8; pf++) {
+               pfconf[pf].rss_pf_map = rss_pf_map;
+               pfconf[pf].rss_pf_mask = rss_pf_mask;
+               t4_read_rss_pf_config(adapter, pf, &pfconf[pf].rss_pf_config);
+       }
+       return 0;
+}
+
+static const struct file_operations rss_pf_config_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = rss_pf_config_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release_private
+};
+
+/* VF RSS Configuration.
+ */
+
+struct rss_vf_conf {
+       u32 rss_vf_vfl;
+       u32 rss_vf_vfh;
+};
+
+static int rss_vf_config_show(struct seq_file *seq, void *v, int idx)
+{
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(seq, "     RSS                     Hash Tuple Enable\n");
+               seq_puts(seq, "     Enable   IVF  Dis  Enb  IPv6      IPv4      UDP    Def  Secret Key\n");
+               seq_puts(seq, " VF  Chn Prt  Map  VLAN  uP  Four Two  Four Two  Four   Que  Idx       Hash\n");
+       } else {
+               struct rss_vf_conf *vfconf = v;
+
+               seq_printf(seq, "%3d  %3s %3s  %3d   %3s %3s   %3s %3s   %3s  %3s   %3s  %4d  %3d %#10x\n",
+                          idx,
+                          yesno(vfconf->rss_vf_vfh & VFCHNEN_F),
+                          yesno(vfconf->rss_vf_vfh & VFPRTEN_F),
+                          VFLKPIDX_G(vfconf->rss_vf_vfh),
+                          yesno(vfconf->rss_vf_vfh & VFVLNEX_F),
+                          yesno(vfconf->rss_vf_vfh & VFUPEN_F),
+                          yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F),
+                          yesno(vfconf->rss_vf_vfh & VFIP6TWOTUPEN_F),
+                          yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F),
+                          yesno(vfconf->rss_vf_vfh & VFIP4TWOTUPEN_F),
+                          yesno(vfconf->rss_vf_vfh & ENABLEUDPHASH_F),
+                          DEFAULTQUEUE_G(vfconf->rss_vf_vfh),
+                          KEYINDEX_G(vfconf->rss_vf_vfh),
+                          vfconf->rss_vf_vfl);
+       }
+       return 0;
+}
+
+static int rss_vf_config_open(struct inode *inode, struct file *file)
+{
+       struct adapter *adapter = inode->i_private;
+       struct seq_tab *p;
+       struct rss_vf_conf *vfconf;
+       int vf;
+
+       p = seq_open_tab(file, 128, sizeof(*vfconf), 1, rss_vf_config_show);
+       if (!p)
+               return -ENOMEM;
+
+       vfconf = (struct rss_vf_conf *)p->data;
+       for (vf = 0; vf < 128; vf++) {
+               t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl,
+                                     &vfconf[vf].rss_vf_vfh);
+       }
+       return 0;
+}
+
+static const struct file_operations rss_vf_config_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = rss_vf_config_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release_private
+};
+
+/**
+ * ethqset2pinfo - return port_info of an Ethernet Queue Set
+ * @adap: the adapter
+ * @qset: Ethernet Queue Set
+ */
+static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
+{
+       int pidx;
+
+       for_each_port(adap, pidx) {
+               struct port_info *pi = adap2pinfo(adap, pidx);
+
+               if (qset >= pi->first_qset &&
+                   qset < pi->first_qset + pi->nqsets)
+                       return pi;
+       }
+
+       /* should never happen! */
+       BUG_ON(1);
+       return NULL;
+}
+
+static int sge_qinfo_show(struct seq_file *seq, void *v)
+{
+       struct adapter *adap = seq->private;
+       int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
+       int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
+       int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
+       int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
+       int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
+       int i, r = (uintptr_t)v - 1;
+       int toe_idx = r - eth_entries;
+       int rdma_idx = toe_idx - toe_entries;
+       int ciq_idx = rdma_idx - rdma_entries;
+       int ctrl_idx =  ciq_idx - ciq_entries;
+       int fq_idx =  ctrl_idx - ctrl_entries;
+
+       if (r)
+               seq_putc(seq, '\n');
+
+#define S3(fmt_spec, s, v) \
+do { \
+       seq_printf(seq, "%-12s", s); \
+       for (i = 0; i < n; ++i) \
+               seq_printf(seq, " %16" fmt_spec, v); \
+               seq_putc(seq, '\n'); \
+} while (0)
+#define S(s, v) S3("s", s, v)
+#define T(s, v) S3("u", s, tx[i].v)
+#define R(s, v) S3("u", s, rx[i].v)
+
+       if (r < eth_entries) {
+               int base_qset = r * 4;
+               const struct sge_eth_rxq *rx = &adap->sge.ethrxq[base_qset];
+               const struct sge_eth_txq *tx = &adap->sge.ethtxq[base_qset];
+               int n = min(4, adap->sge.ethqsets - 4 * r);
+
+               S("QType:", "Ethernet");
+               S("Interface:",
+                 rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+               T("TxQ ID:", q.cntxt_id);
+               T("TxQ size:", q.size);
+               T("TxQ inuse:", q.in_use);
+               T("TxQ CIDX:", q.cidx);
+               T("TxQ PIDX:", q.pidx);
+#ifdef CONFIG_CHELSIO_T4_DCB
+               T("DCB Prio:", dcb_prio);
+               S3("u", "DCB PGID:",
+                  (ethqset2pinfo(adap, base_qset + i)->dcb.pgid >>
+                   4*(7-tx[i].dcb_prio)) & 0xf);
+               S3("u", "DCB PFC:",
+                  (ethqset2pinfo(adap, base_qset + i)->dcb.pfcen >>
+                   1*(7-tx[i].dcb_prio)) & 0x1);
+#endif
+               R("RspQ ID:", rspq.abs_id);
+               R("RspQ size:", rspq.size);
+               R("RspQE size:", rspq.iqe_len);
+               R("RspQ CIDX:", rspq.cidx);
+               R("RspQ Gen:", rspq.gen);
+               S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+               S3("u", "Intr pktcnt:",
+                  adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+               R("FL ID:", fl.cntxt_id);
+               R("FL size:", fl.size - 8);
+               R("FL pend:", fl.pend_cred);
+               R("FL avail:", fl.avail);
+               R("FL PIDX:", fl.pidx);
+               R("FL CIDX:", fl.cidx);
+       } else if (toe_idx < toe_entries) {
+               const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4];
+               const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4];
+               int n = min(4, adap->sge.ofldqsets - 4 * toe_idx);
+
+               S("QType:", "TOE");
+               T("TxQ ID:", q.cntxt_id);
+               T("TxQ size:", q.size);
+               T("TxQ inuse:", q.in_use);
+               T("TxQ CIDX:", q.cidx);
+               T("TxQ PIDX:", q.pidx);
+               R("RspQ ID:", rspq.abs_id);
+               R("RspQ size:", rspq.size);
+               R("RspQE size:", rspq.iqe_len);
+               R("RspQ CIDX:", rspq.cidx);
+               R("RspQ Gen:", rspq.gen);
+               S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+               S3("u", "Intr pktcnt:",
+                  adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+               R("FL ID:", fl.cntxt_id);
+               R("FL size:", fl.size - 8);
+               R("FL pend:", fl.pend_cred);
+               R("FL avail:", fl.avail);
+               R("FL PIDX:", fl.pidx);
+               R("FL CIDX:", fl.cidx);
+       } else if (rdma_idx < rdma_entries) {
+               const struct sge_ofld_rxq *rx =
+                               &adap->sge.rdmarxq[rdma_idx * 4];
+               int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx);
+
+               S("QType:", "RDMA-CPL");
+               R("RspQ ID:", rspq.abs_id);
+               R("RspQ size:", rspq.size);
+               R("RspQE size:", rspq.iqe_len);
+               R("RspQ CIDX:", rspq.cidx);
+               R("RspQ Gen:", rspq.gen);
+               S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+               S3("u", "Intr pktcnt:",
+                  adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+               R("FL ID:", fl.cntxt_id);
+               R("FL size:", fl.size - 8);
+               R("FL pend:", fl.pend_cred);
+               R("FL avail:", fl.avail);
+               R("FL PIDX:", fl.pidx);
+               R("FL CIDX:", fl.cidx);
+       } else if (ciq_idx < ciq_entries) {
+               const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
+               int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
+
+               S("QType:", "RDMA-CIQ");
+               R("RspQ ID:", rspq.abs_id);
+               R("RspQ size:", rspq.size);
+               R("RspQE size:", rspq.iqe_len);
+               R("RspQ CIDX:", rspq.cidx);
+               R("RspQ Gen:", rspq.gen);
+               S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+               S3("u", "Intr pktcnt:",
+                  adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+       } else if (ctrl_idx < ctrl_entries) {
+               const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
+               int n = min(4, adap->params.nports - 4 * ctrl_idx);
+
+               S("QType:", "Control");
+               T("TxQ ID:", q.cntxt_id);
+               T("TxQ size:", q.size);
+               T("TxQ inuse:", q.in_use);
+               T("TxQ CIDX:", q.cidx);
+               T("TxQ PIDX:", q.pidx);
+       } else if (fq_idx == 0) {
+               const struct sge_rspq *evtq = &adap->sge.fw_evtq;
+
+               seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
+               seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
+               seq_printf(seq, "%-12s %16u\n", "RspQ size:", evtq->size);
+               seq_printf(seq, "%-12s %16u\n", "RspQE size:", evtq->iqe_len);
+               seq_printf(seq, "%-12s %16u\n", "RspQ CIDX:", evtq->cidx);
+               seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
+               seq_printf(seq, "%-12s %16u\n", "Intr delay:",
+                          qtimer_val(adap, evtq));
+               seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
+                          adap->sge.counter_val[evtq->pktcnt_idx]);
+       }
+#undef R
+#undef T
+#undef S
+#undef S3
+return 0;
+}
+
+static int sge_queue_entries(const struct adapter *adap)
+{
+       return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
+              DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
+              DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
+              DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
+              DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
+}
+
+static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
+{
+       int entries = sge_queue_entries(seq->private);
+
+       return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static void sge_queue_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       int entries = sge_queue_entries(seq->private);
+
+       ++*pos;
+       return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static const struct seq_operations sge_qinfo_seq_ops = {
+       .start = sge_queue_start,
+       .next  = sge_queue_next,
+       .stop  = sge_queue_stop,
+       .show  = sge_qinfo_show
+};
+
+static int sge_qinfo_open(struct inode *inode, struct file *file)
+{
+       int res = seq_open(file, &sge_qinfo_seq_ops);
+
+       if (!res) {
+               struct seq_file *seq = file->private_data;
+
+               seq->private = inode->i_private;
+       }
+       return res;
+}
+
+static const struct file_operations sge_qinfo_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = sge_qinfo_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release,
+};
+
+int mem_open(struct inode *inode, struct file *file)
+{
+       unsigned int mem;
+       struct adapter *adap;
+
+       file->private_data = inode->i_private;
+
+       mem = (uintptr_t)file->private_data & 0x3;
+       adap = file->private_data - mem;
+
+       (void)t4_fwcache(adap, FW_PARAM_DEV_FWCACHE_FLUSH);
+
+       return 0;
+}
+
 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
                        loff_t *ppos)
 {
@@ -80,7 +1934,6 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
        *ppos = pos + count;
        return count;
 }
-
 static const struct file_operations mem_debugfs_fops = {
        .owner   = THIS_MODULE,
        .open    = simple_open,
@@ -88,6 +1941,12 @@ static const struct file_operations mem_debugfs_fops = {
        .llseek  = default_llseek,
 };
 
+static void set_debugfs_file_size(struct dentry *de, loff_t size)
+{
+       if (!IS_ERR(de) && de->d_inode)
+               de->d_inode->i_size = size;
+}
+
 static void add_debugfs_mem(struct adapter *adap, const char *name,
                            unsigned int idx, unsigned int size_mb)
 {
@@ -119,14 +1978,65 @@ int t4_setup_debugfs(struct adapter *adap)
 {
        int i;
        u32 size;
+       struct dentry *de;
 
        static struct t4_debugfs_entry t4_debugfs_files[] = {
+               { "cim_la", &cim_la_fops, S_IRUSR, 0 },
+               { "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
+               { "clk", &clk_debugfs_fops, S_IRUSR, 0 },
+               { "devlog", &devlog_fops, S_IRUSR, 0 },
+               { "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
+               { "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
+               { "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
+               { "mbox3", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 3 },
+               { "mbox4", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 4 },
+               { "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 },
+               { "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 },
+               { "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 },
                { "l2t", &t4_l2t_fops, S_IRUSR, 0},
+               { "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 },
+               { "rss", &rss_debugfs_fops, S_IRUSR, 0 },
+               { "rss_config", &rss_config_debugfs_fops, S_IRUSR, 0 },
+               { "rss_key", &rss_key_debugfs_fops, S_IRUSR, 0 },
+               { "rss_pf_config", &rss_pf_config_debugfs_fops, S_IRUSR, 0 },
+               { "rss_vf_config", &rss_vf_config_debugfs_fops, S_IRUSR, 0 },
+               { "sge_qinfo", &sge_qinfo_debugfs_fops, S_IRUSR, 0 },
+               { "ibq_tp0",  &cim_ibq_fops, S_IRUSR, 0 },
+               { "ibq_tp1",  &cim_ibq_fops, S_IRUSR, 1 },
+               { "ibq_ulp",  &cim_ibq_fops, S_IRUSR, 2 },
+               { "ibq_sge0", &cim_ibq_fops, S_IRUSR, 3 },
+               { "ibq_sge1", &cim_ibq_fops, S_IRUSR, 4 },
+               { "ibq_ncsi", &cim_ibq_fops, S_IRUSR, 5 },
+               { "obq_ulp0", &cim_obq_fops, S_IRUSR, 0 },
+               { "obq_ulp1", &cim_obq_fops, S_IRUSR, 1 },
+               { "obq_ulp2", &cim_obq_fops, S_IRUSR, 2 },
+               { "obq_ulp3", &cim_obq_fops, S_IRUSR, 3 },
+               { "obq_sge",  &cim_obq_fops, S_IRUSR, 4 },
+               { "obq_ncsi", &cim_obq_fops, S_IRUSR, 5 },
+               { "tp_la", &tp_la_fops, S_IRUSR, 0 },
+               { "ulprx_la", &ulprx_la_fops, S_IRUSR, 0 },
+               { "sensors", &sensors_debugfs_fops, S_IRUSR, 0 },
+               { "pm_stats", &pm_stats_debugfs_fops, S_IRUSR, 0 },
+               { "cctrl", &cctrl_tbl_debugfs_fops, S_IRUSR, 0 },
+#if IS_ENABLED(CONFIG_IPV6)
+               { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
+#endif
+       };
+
+       /* Debug FS nodes common to all T5 and later adapters.
+        */
+       static struct t4_debugfs_entry t5_debugfs_files[] = {
+               { "obq_sge_rx_q0", &cim_obq_fops, S_IRUSR, 6 },
+               { "obq_sge_rx_q1", &cim_obq_fops, S_IRUSR, 7 },
        };
 
        add_debugfs_files(adap,
                          t4_debugfs_files,
                          ARRAY_SIZE(t4_debugfs_files));
+       if (!is_t4(adap->params.chip))
+               add_debugfs_files(adap,
+                                 t5_debugfs_files,
+                                 ARRAY_SIZE(t5_debugfs_files));
 
        i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
        if (i & EDRAM0_ENABLE_F) {
@@ -154,5 +2064,10 @@ int t4_setup_debugfs(struct adapter *adap)
                                        EXT_MEM1_SIZE_G(size));
                }
        }
+
+       de = debugfs_create_file("flash", S_IRUSR, adap->debugfs_root, adap,
+                                &flash_debugfs_fops);
+       set_debugfs_file_size(de, adap->params.sf_size);
+
        return 0;
 }
index a3d8867efd3d7aff3e59c0cac6c71a7561cd1364..b63cfee2d96393b3fb4685adbd0afa817d41f226 100644 (file)
 
 #include <linux/export.h>
 
+#define FILE_DATA(_file) ((_file)->f_path.dentry->d_inode)
+
+#define DEFINE_SIMPLE_DEBUGFS_FILE(name) \
+static int name##_open(struct inode *inode, struct file *file) \
+{ \
+       return single_open(file, name##_show, inode->i_private); \
+} \
+static const struct file_operations name##_debugfs_fops = { \
+       .owner   = THIS_MODULE, \
+       .open    = name##_open, \
+       .read    = seq_read, \
+       .llseek  = seq_lseek, \
+       .release = single_release \
+}
+
 struct t4_debugfs_entry {
        const char *name;
        const struct file_operations *ops;
@@ -44,9 +59,27 @@ struct t4_debugfs_entry {
        unsigned char data;
 };
 
+struct seq_tab {
+       int (*show)(struct seq_file *seq, void *v, int idx);
+       unsigned int rows;        /* # of entries */
+       unsigned char width;      /* size in bytes of each entry */
+       unsigned char skip_first; /* whether the first line is a header */
+       char data[0];             /* the table data */
+};
+
+static inline unsigned int hex2val(char c)
+{
+       return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10;
+}
+
+struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
+                            unsigned int width, unsigned int have_header,
+                            int (*show)(struct seq_file *seq, void *v, int i));
+
 int t4_setup_debugfs(struct adapter *adap);
 void add_debugfs_files(struct adapter *adap,
                       struct t4_debugfs_entry *files,
                       unsigned int nfiles);
+int mem_open(struct inode *inode, struct file *file);
 
 #endif
index ccf3436024bc8ce8ffdc814553469a523db73c92..5db5b4f7b94d7ff29f86ba478cbdc7f75bf2e0e5 100644 (file)
 #include <net/netevent.h>
 #include <net/addrconf.h>
 #include <net/bonding.h>
+#include <net/addrconf.h>
 #include <asm/uaccess.h>
 
 #include "cxgb4.h"
 #include "t4_regs.h"
+#include "t4_values.h"
 #include "t4_msg.h"
 #include "t4fw_api.h"
+#include "t4fw_version.h"
 #include "cxgb4_dcb.h"
 #include "cxgb4_debugfs.h"
+#include "clip_tbl.h"
 #include "l2t.h"
 
 #ifdef DRV_VERSION
 #define DRV_VERSION "2.0.0-ko"
 #define DRV_DESC "Chelsio T4/T5 Network Driver"
 
-/*
- * Max interrupt hold-off timer value in us.  Queues fall back to this value
- * under extreme memory pressure so it's largish to give the system time to
- * recover.
- */
-#define MAX_SGE_TIMERVAL 200U
-
-enum {
-       /*
-        * Physical Function provisioning constants.
-        */
-       PFRES_NVI = 4,                  /* # of Virtual Interfaces */
-       PFRES_NETHCTRL = 128,           /* # of EQs used for ETH or CTRL Qs */
-       PFRES_NIQFLINT = 128,           /* # of ingress Qs/w Free List(s)/intr
-                                        */
-       PFRES_NEQ = 256,                /* # of egress queues */
-       PFRES_NIQ = 0,                  /* # of ingress queues */
-       PFRES_TC = 0,                   /* PCI-E traffic class */
-       PFRES_NEXACTF = 128,            /* # of exact MPS filters */
-
-       PFRES_R_CAPS = FW_CMD_CAP_PF,
-       PFRES_WX_CAPS = FW_CMD_CAP_PF,
-
-#ifdef CONFIG_PCI_IOV
-       /*
-        * Virtual Function provisioning constants.  We need two extra Ingress
-        * Queues with Interrupt capability to serve as the VF's Firmware
-        * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
-        * neither will have Free Lists associated with them).  For each
-        * Ethernet/Control Egress Queue and for each Free List, we need an
-        * Egress Context.
-        */
-       VFRES_NPORTS = 1,               /* # of "ports" per VF */
-       VFRES_NQSETS = 2,               /* # of "Queue Sets" per VF */
-
-       VFRES_NVI = VFRES_NPORTS,       /* # of Virtual Interfaces */
-       VFRES_NETHCTRL = VFRES_NQSETS,  /* # of EQs used for ETH or CTRL Qs */
-       VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
-       VFRES_NEQ = VFRES_NQSETS*2,     /* # of egress queues */
-       VFRES_NIQ = 0,                  /* # of non-fl/int ingress queues */
-       VFRES_TC = 0,                   /* PCI-E traffic class */
-       VFRES_NEXACTF = 16,             /* # of exact MPS filters */
-
-       VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
-       VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
-#endif
-};
-
-/*
- * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
- * static and likely not to be useful in the long run.  We really need to
- * implement some form of persistent configuration which the firmware
- * controls.
- */
-static unsigned int pfvfres_pmask(struct adapter *adapter,
-                                 unsigned int pf, unsigned int vf)
-{
-       unsigned int portn, portvec;
-
-       /*
-        * Give PF's access to all of the ports.
-        */
-       if (vf == 0)
-               return FW_PFVF_CMD_PMASK_M;
-
-       /*
-        * For VFs, we'll assign them access to the ports based purely on the
-        * PF.  We assign active ports in order, wrapping around if there are
-        * fewer active ports than PFs: e.g. active port[pf % nports].
-        * Unfortunately the adapter's port_info structs haven't been
-        * initialized yet so we have to compute this.
-        */
-       if (adapter->params.nports == 0)
-               return 0;
-
-       portn = pf % adapter->params.nports;
-       portvec = adapter->params.portvec;
-       for (;;) {
-               /*
-                * Isolate the lowest set bit in the port vector.  If we're at
-                * the port number that we want, return that as the pmask.
-                * otherwise mask that bit out of the port vector and
-                * decrement our port number ...
-                */
-               unsigned int pmask = portvec ^ (portvec & (portvec-1));
-               if (portn == 0)
-                       return pmask;
-               portn--;
-               portvec &= ~pmask;
-       }
-       /*NOTREACHED*/
-}
-
 enum {
        MAX_TXQ_ENTRIES      = 16384,
        MAX_CTRL_TXQ_ENTRIES = 1024,
@@ -263,7 +174,8 @@ MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter")
 static uint force_old_init;
 
 module_param(force_old_init, uint, 0644);
-MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
+MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
+                " parameter");
 
 static int dflt_msg_enable = DFLT_MSG_ENABLE;
 
@@ -292,13 +204,14 @@ static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
 
 module_param_array(intr_holdoff, uint, NULL, 0644);
 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
-                "0..4 in microseconds");
+                "0..4 in microseconds, deprecated parameter");
 
 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
 
 module_param_array(intr_cnt, uint, NULL, 0644);
 MODULE_PARM_DESC(intr_cnt,
-                "thresholds 1..3 for queue interrupt packet counters");
+                "thresholds 1..3 for queue interrupt packet counters, "
+                "deprecated parameter");
 
 /*
  * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
@@ -318,7 +231,8 @@ static bool vf_acls;
 
 #ifdef CONFIG_PCI_IOV
 module_param(vf_acls, bool, 0644);
-MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
+MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
+                "deprecated parameter");
 
 /* Configure the number of PCI-E Virtual Function which are to be instantiated
  * on SR-IOV Capable Physical Functions.
@@ -340,32 +254,11 @@ module_param(select_queue, int, 0644);
 MODULE_PARM_DESC(select_queue,
                 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
 
-/*
- * The filter TCAM has a fixed portion and a variable portion.  The fixed
- * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
- * ports.  The variable portion is 36 bits which can include things like Exact
- * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
- * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
- * far exceed the 36-bit budget for this "compressed" header portion of the
- * filter.  Thus, we have a scarce resource which must be carefully managed.
- *
- * By default we set this up to mostly match the set of filter matching
- * capabilities of T3 but with accommodations for some of T4's more
- * interesting features:
- *
- *   { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
- *     [Inner] VLAN (17), Port (3), FCoE (1) }
- */
-enum {
-       TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
-       TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
-       TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
-};
-
-static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
+static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
 
 module_param(tp_vlan_pri_map, uint, 0644);
-MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
+MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
+                "deprecated parameter");
 
 static struct dentry *cxgb4_debugfs_root;
 
@@ -671,7 +564,7 @@ static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
        if (idx >= adap->tids.ftid_base && nidx <
           (adap->tids.nftids + adap->tids.nsftids)) {
                idx = nidx;
-               ret = GET_TCB_COOKIE(rpl->cookie);
+               ret = TCB_COOKIE_G(rpl->cookie);
                f = &adap->tids.ftid_tab[idx];
 
                if (ret == FW_FILTER_WR_FLT_DELETED) {
@@ -723,7 +616,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
 
        if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
                const struct cpl_sge_egr_update *p = (void *)rsp;
-               unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
+               unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
                struct sge_txq *txq;
 
                txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
@@ -833,11 +726,11 @@ static void disable_msi(struct adapter *adapter)
 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
 {
        struct adapter *adap = cookie;
+       u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
 
-       u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
-       if (v & PFSW) {
+       if (v & PFSW_F) {
                adap->swintr = 1;
-               t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
+               t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
        }
        t4_slow_intr_handler(adap);
        return IRQ_HANDLED;
@@ -1030,8 +923,14 @@ static void quiesce_rx(struct adapter *adap)
        for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
                struct sge_rspq *q = adap->sge.ingr_map[i];
 
-               if (q && q->handler)
+               if (q && q->handler) {
                        napi_disable(&q->napi);
+                       local_bh_disable();
+                       while (!cxgb_poll_lock_napi(q))
+                               mdelay(1);
+                       local_bh_enable();
+               }
+
        }
 }
 
@@ -1047,12 +946,14 @@ static void enable_rx(struct adapter *adap)
 
                if (!q)
                        continue;
-               if (q->handler)
+               if (q->handler) {
+                       cxgb_busy_poll_init_lock(q);
                        napi_enable(&q->napi);
+               }
                /* 0-increment GTS to start the timer and enable interrupts */
-               t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
-                            SEINTARM(q->intr_params) |
-                            INGRESSQID(q->cntxt_id));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
+                            SEINTARM_V(q->intr_params) |
+                            INGRESSQID_V(q->cntxt_id));
        }
 }
 
@@ -1176,10 +1077,10 @@ freeout:        t4_free_sge_resources(adap);
        }
 
        t4_write_reg(adap, is_t4(adap->params.chip) ?
-                               MPS_TRC_RSS_CONTROL :
-                               MPS_T5_TRC_RSS_CONTROL,
-                    RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
-                    QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
+                               MPS_TRC_RSS_CONTROL_A :
+                               MPS_T5_TRC_RSS_CONTROL_A,
+                    RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
+                    QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
        return 0;
 }
 
@@ -1589,9 +1490,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
        collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
        data += sizeof(struct queue_port_stats) / sizeof(u64);
        if (!is_t4(adapter->params.chip)) {
-               t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
-               val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
-               val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
+               t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
+               val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
+               val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
                *data = val1 - val2;
                data++;
                *data = val2;
@@ -2608,8 +2509,8 @@ static int closest_thres(const struct sge *s, int thres)
 /*
  * Return a queue's interrupt hold-off time in us.  0 means no timer.
  */
-static unsigned int qtimer_val(const struct adapter *adap,
-                              const struct sge_rspq *q)
+unsigned int qtimer_val(const struct adapter *adap,
+                       const struct sge_rspq *q)
 {
        unsigned int idx = q->intr_params >> 1;
 
@@ -3346,40 +3247,6 @@ static int tid_init(struct tid_info *t)
        return 0;
 }
 
-int cxgb4_clip_get(const struct net_device *dev,
-                  const struct in6_addr *lip)
-{
-       struct adapter *adap;
-       struct fw_clip_cmd c;
-
-       adap = netdev2adap(dev);
-       memset(&c, 0, sizeof(c));
-       c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
-                       FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
-       c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
-       c.ip_hi = *(__be64 *)(lip->s6_addr);
-       c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
-       return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
-}
-EXPORT_SYMBOL(cxgb4_clip_get);
-
-int cxgb4_clip_release(const struct net_device *dev,
-                      const struct in6_addr *lip)
-{
-       struct adapter *adap;
-       struct fw_clip_cmd c;
-
-       adap = netdev2adap(dev);
-       memset(&c, 0, sizeof(c));
-       c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
-                       FW_CMD_REQUEST_F | FW_CMD_READ_F);
-       c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
-       c.ip_hi = *(__be64 *)(lip->s6_addr);
-       c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
-       return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
-}
-EXPORT_SYMBOL(cxgb4_clip_release);
-
 /**
  *     cxgb4_create_server - create an IP server
  *     @dev: the device
@@ -3415,8 +3282,8 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
        req->peer_ip = htonl(0);
        chan = rxq_to_chan(&adap->sge, queue);
        req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
-       req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
-                               SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+       req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
+                               SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
        ret = t4_mgmt_tx(adap, skb);
        return net_xmit_eval(ret);
 }
@@ -3458,8 +3325,8 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
        req->peer_ip_lo = cpu_to_be64(0);
        chan = rxq_to_chan(&adap->sge, queue);
        req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
-       req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
-                               SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+       req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
+                               SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
        ret = t4_mgmt_tx(adap, skb);
        return net_xmit_eval(ret);
 }
@@ -3482,8 +3349,8 @@ int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
        req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
        INIT_TP_WR(req, 0);
        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
-       req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
-                               LISTSVR_IPV6(0)) | QUEUENO(queue));
+       req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
+                               LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
        ret = t4_mgmt_tx(adap, skb);
        return net_xmit_eval(ret);
 }
@@ -3600,14 +3467,14 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
        struct adapter *adap = netdev2adap(dev);
        u32 v1, v2, lp_count, hp_count;
 
-       v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
-       v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+       v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
+       v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
        if (is_t4(adap->params.chip)) {
-               lp_count = G_LP_COUNT(v1);
-               hp_count = G_HP_COUNT(v1);
+               lp_count = LP_COUNT_G(v1);
+               hp_count = HP_COUNT_G(v1);
        } else {
-               lp_count = G_LP_COUNT_T5(v1);
-               hp_count = G_HP_COUNT_T5(v2);
+               lp_count = LP_COUNT_T5_G(v1);
+               hp_count = HP_COUNT_T5_G(v2);
        }
        return lpfifo ? lp_count : hp_count;
 }
@@ -3653,10 +3520,10 @@ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
 {
        struct adapter *adap = netdev2adap(dev);
 
-       t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
-       t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
-                    HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
-                    HPZ3(pgsz_order[3]));
+       t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
+       t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
+                    HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
+                    HPZ3_V(pgsz_order[3]));
 }
 EXPORT_SYMBOL(cxgb4_iscsi_init);
 
@@ -3666,14 +3533,14 @@ int cxgb4_flush_eq_cache(struct net_device *dev)
        int ret;
 
        ret = t4_fwaddrspace_write(adap, adap->mbox,
-                                  0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
+                                  0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
        return ret;
 }
 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
 
 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
 {
-       u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
+       u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
        __be64 indices;
        int ret;
 
@@ -3702,14 +3569,20 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
 
        if (pidx != hw_pidx) {
                u16 delta;
+               u32 val;
 
                if (pidx >= hw_pidx)
                        delta = pidx - hw_pidx;
                else
                        delta = size - hw_pidx + pidx;
+
+               if (is_t4(adap->params.chip))
+                       val = PIDX_V(delta);
+               else
+                       val = PIDX_T5_V(delta);
                wmb();
-               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
-                            QID(qid) | PIDX(delta));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+                            QID_V(qid) | val);
        }
 out:
        return ret;
@@ -3721,8 +3594,8 @@ void cxgb4_disable_db_coalescing(struct net_device *dev)
        struct adapter *adap;
 
        adap = netdev2adap(dev);
-       t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
-                        F_NOCOALESCE);
+       t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
+                        NOCOALESCE_F);
 }
 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
 
@@ -3731,7 +3604,7 @@ void cxgb4_enable_db_coalescing(struct net_device *dev)
        struct adapter *adap;
 
        adap = netdev2adap(dev);
-       t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
+       t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
 }
 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
 
@@ -3809,8 +3682,8 @@ u64 cxgb4_read_sge_timestamp(struct net_device *dev)
        struct adapter *adap;
 
        adap = netdev2adap(dev);
-       lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
-       hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
+       lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
+       hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
 
        return ((u64)hi << 32) | (u64)lo;
 }
@@ -3870,14 +3743,14 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
        u32 v1, v2, lp_count, hp_count;
 
        do {
-               v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
-               v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+               v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
+               v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
                if (is_t4(adap->params.chip)) {
-                       lp_count = G_LP_COUNT(v1);
-                       hp_count = G_HP_COUNT(v1);
+                       lp_count = LP_COUNT_G(v1);
+                       hp_count = HP_COUNT_G(v1);
                } else {
-                       lp_count = G_LP_COUNT_T5(v1);
-                       hp_count = G_HP_COUNT_T5(v2);
+                       lp_count = LP_COUNT_T5_G(v1);
+                       hp_count = HP_COUNT_T5_G(v2);
                }
 
                if (lp_count == 0 && hp_count == 0)
@@ -3904,8 +3777,8 @@ static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
                 * are committed before we tell HW about them.
                 */
                wmb();
-               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
-                            QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+                            QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
                q->db_pidx_inc = 0;
        }
        q->db_disabled = 0;
@@ -3952,9 +3825,9 @@ static void process_db_full(struct work_struct *work)
        drain_db_fifo(adap, dbfifo_drain_delay);
        enable_dbs(adap);
        notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
-       t4_set_reg_field(adap, SGE_INT_ENABLE3,
-                        DBFIFO_HP_INT | DBFIFO_LP_INT,
-                        DBFIFO_HP_INT | DBFIFO_LP_INT);
+       t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+                        DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
+                        DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
 }
 
 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -3968,14 +3841,20 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
                goto out;
        if (q->db_pidx != hw_pidx) {
                u16 delta;
+               u32 val;
 
                if (q->db_pidx >= hw_pidx)
                        delta = q->db_pidx - hw_pidx;
                else
                        delta = q->size - hw_pidx + q->db_pidx;
+
+               if (is_t4(adap->params.chip))
+                       val = PIDX_V(delta);
+               else
+                       val = PIDX_T5_V(delta);
                wmb();
-               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
-                            QID(q->cntxt_id) | PIDX(delta));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+                            QID_V(q->cntxt_id) | val);
        }
 out:
        q->db_disabled = 0;
@@ -4024,14 +3903,14 @@ static void process_db_drop(struct work_struct *work)
                        dev_err(adap->pdev_dev, "doorbell drop recovery: "
                                "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
                else
-                       writel(PIDX_T5(pidx_inc) | QID(bar2_qid),
+                       writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
                               adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
 
                /* Re-enable BAR2 WC */
                t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
        }
 
-       t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
+       t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
 }
 
 void t4_db_full(struct adapter *adap)
@@ -4039,8 +3918,8 @@ void t4_db_full(struct adapter *adap)
        if (is_t4(adap->params.chip)) {
                disable_dbs(adap);
                notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
-               t4_set_reg_field(adap, SGE_INT_ENABLE3,
-                                DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
+               t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+                                DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
                queue_work(adap->workq, &adap->db_full_task);
        }
 }
@@ -4081,7 +3960,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        lli.nports = adap->params.nports;
        lli.wr_cred = adap->params.ofldq_wr_cred;
        lli.adapter_type = adap->params.chip;
-       lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
+       lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
        lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
        lli.udb_density = 1 << adap->params.sge.eq_qpp;
        lli.ucq_density = 1 << adap->params.sge.iq_qpp;
@@ -4089,8 +3968,8 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
        for (i = 0; i < NCHAN; i++)
                lli.tx_modq[i] = i;
-       lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
-       lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
+       lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
+       lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
        lli.fw_vers = adap->params.fw_vers;
        lli.dbfifo_int_thresh = dbfifo_int_thresh;
        lli.sge_ingpadboundary = adap->sge.fl_align;
@@ -4220,148 +4099,61 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
 }
 EXPORT_SYMBOL(cxgb4_unregister_uld);
 
-/* Check if netdev on which event is occured belongs to us or not. Return
- * success (true) if it belongs otherwise failure (false).
- * Called with rcu_read_lock() held.
- */
 #if IS_ENABLED(CONFIG_IPV6)
-static bool cxgb4_netdev(const struct net_device *netdev)
+static int cxgb4_inet6addr_handler(struct notifier_block *this,
+                                  unsigned long event, void *data)
 {
+       struct inet6_ifaddr *ifa = data;
+       struct net_device *event_dev = ifa->idev->dev;
+       const struct device *parent = NULL;
+#if IS_ENABLED(CONFIG_BONDING)
        struct adapter *adap;
-       int i;
-
-       list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
-               for (i = 0; i < MAX_NPORTS; i++)
-                       if (adap->port[i] == netdev)
-                               return true;
-       return false;
-}
+#endif
+       if (event_dev->priv_flags & IFF_802_1Q_VLAN)
+               event_dev = vlan_dev_real_dev(event_dev);
+#if IS_ENABLED(CONFIG_BONDING)
+       if (event_dev->flags & IFF_MASTER) {
+               list_for_each_entry(adap, &adapter_list, list_node) {
+                       switch (event) {
+                       case NETDEV_UP:
+                               cxgb4_clip_get(adap->port[0],
+                                              (const u32 *)ifa, 1);
+                               break;
+                       case NETDEV_DOWN:
+                               cxgb4_clip_release(adap->port[0],
+                                                  (const u32 *)ifa, 1);
+                               break;
+                       default:
+                               break;
+                       }
+               }
+               return NOTIFY_OK;
+       }
+#endif
 
-static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
-                   unsigned long event)
-{
-       int ret = NOTIFY_DONE;
+       if (event_dev)
+               parent = event_dev->dev.parent;
 
-       rcu_read_lock();
-       if (cxgb4_netdev(event_dev)) {
+       if (parent && parent->driver == &cxgb4_driver.driver) {
                switch (event) {
                case NETDEV_UP:
-                       ret = cxgb4_clip_get(event_dev, &ifa->addr);
-                       if (ret < 0) {
-                               rcu_read_unlock();
-                               return ret;
-                       }
-                       ret = NOTIFY_OK;
+                       cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
                        break;
                case NETDEV_DOWN:
-                       cxgb4_clip_release(event_dev, &ifa->addr);
-                       ret = NOTIFY_OK;
+                       cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
                        break;
                default:
                        break;
                }
        }
-       rcu_read_unlock();
-       return ret;
-}
-
-static int cxgb4_inet6addr_handler(struct notifier_block *this,
-               unsigned long event, void *data)
-{
-       struct inet6_ifaddr *ifa = data;
-       struct net_device *event_dev;
-       int ret = NOTIFY_DONE;
-       struct bonding *bond = netdev_priv(ifa->idev->dev);
-       struct list_head *iter;
-       struct slave *slave;
-       struct pci_dev *first_pdev = NULL;
-
-       if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
-               event_dev = vlan_dev_real_dev(ifa->idev->dev);
-               ret = clip_add(event_dev, ifa, event);
-       } else if (ifa->idev->dev->flags & IFF_MASTER) {
-               /* It is possible that two different adapters are bonded in one
-                * bond. We need to find such different adapters and add clip
-                * in all of them only once.
-                */
-               bond_for_each_slave(bond, slave, iter) {
-                       if (!first_pdev) {
-                               ret = clip_add(slave->dev, ifa, event);
-                               /* If clip_add is success then only initialize
-                                * first_pdev since it means it is our device
-                                */
-                               if (ret == NOTIFY_OK)
-                                       first_pdev = to_pci_dev(
-                                                       slave->dev->dev.parent);
-                       } else if (first_pdev !=
-                                  to_pci_dev(slave->dev->dev.parent))
-                                       ret = clip_add(slave->dev, ifa, event);
-               }
-       } else
-               ret = clip_add(ifa->idev->dev, ifa, event);
-
-       return ret;
+       return NOTIFY_OK;
 }
 
+static bool inet6addr_registered;
 static struct notifier_block cxgb4_inet6addr_notifier = {
        .notifier_call = cxgb4_inet6addr_handler
 };
 
-/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
- * a physical device.
- * The physical device reference is needed to send the actul CLIP command.
- */
-static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
-{
-       struct inet6_dev *idev = NULL;
-       struct inet6_ifaddr *ifa;
-       int ret = 0;
-
-       idev = __in6_dev_get(root_dev);
-       if (!idev)
-               return ret;
-
-       read_lock_bh(&idev->lock);
-       list_for_each_entry(ifa, &idev->addr_list, if_list) {
-               ret = cxgb4_clip_get(dev, &ifa->addr);
-               if (ret < 0)
-                       break;
-       }
-       read_unlock_bh(&idev->lock);
-
-       return ret;
-}
-
-static int update_root_dev_clip(struct net_device *dev)
-{
-       struct net_device *root_dev = NULL;
-       int i, ret = 0;
-
-       /* First populate the real net device's IPv6 addresses */
-       ret = update_dev_clip(dev, dev);
-       if (ret)
-               return ret;
-
-       /* Parse all bond and vlan devices layered on top of the physical dev */
-       root_dev = netdev_master_upper_dev_get_rcu(dev);
-       if (root_dev) {
-               ret = update_dev_clip(root_dev, dev);
-               if (ret)
-                       return ret;
-       }
-
-       for (i = 0; i < VLAN_N_VID; i++) {
-               root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
-               if (!root_dev)
-                       continue;
-
-               ret = update_dev_clip(root_dev, dev);
-               if (ret)
-                       break;
-       }
-       return ret;
-}
-
 static void update_clip(const struct adapter *adap)
 {
        int i;
@@ -4375,7 +4167,7 @@ static void update_clip(const struct adapter *adap)
                ret = 0;
 
                if (dev)
-                       ret = update_root_dev_clip(dev);
+                       ret = cxgb4_update_root_dev_clip(dev);
 
                if (ret < 0)
                        break;
@@ -4567,13 +4359,13 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
                        f->fs.val.lip[i] = val[i];
                        f->fs.mask.lip[i] = ~0;
                }
-               if (adap->params.tp.vlan_pri_map & F_PORT) {
+               if (adap->params.tp.vlan_pri_map & PORT_F) {
                        f->fs.val.iport = port;
                        f->fs.mask.iport = mask;
                }
        }
 
-       if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
+       if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
                f->fs.val.proto = IPPROTO_TCP;
                f->fs.mask.proto = ~0;
        }
@@ -4779,11 +4571,15 @@ static const struct net_device_ops cxgb4_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller  = cxgb_netpoll,
 #endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       .ndo_busy_poll        = cxgb_busy_poll,
+#endif
+
 };
 
 void t4_fatal_err(struct adapter *adap)
 {
-       t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
+       t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
        t4_intr_disable(adap);
        dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
 }
@@ -4858,16 +4654,16 @@ static void setup_memwin(struct adapter *adap)
                mem_win2_base = MEMWIN2_BASE_T5;
                mem_win2_aperture = MEMWIN2_APERTURE_T5;
        }
-       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
-                    mem_win0_base | BIR(0) |
-                    WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
-       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
-                    mem_win1_base | BIR(0) |
-                    WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
-       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
-                    mem_win2_base | BIR(0) |
-                    WINDOW(ilog2(mem_win2_aperture) - 10));
-       t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
+       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
+                    mem_win0_base | BIR_V(0) |
+                    WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
+       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
+                    mem_win1_base | BIR_V(0) |
+                    WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
+       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
+                    mem_win2_base | BIR_V(0) |
+                    WINDOW_V(ilog2(mem_win2_aperture) - 10));
+       t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
 }
 
 static void setup_memwin_rdma(struct adapter *adap)
@@ -4881,13 +4677,13 @@ static void setup_memwin_rdma(struct adapter *adap)
                start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
                sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
                t4_write_reg(adap,
-                            PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
-                            start | BIR(1) | WINDOW(ilog2(sz_kb)));
+                            PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
+                            start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
                t4_write_reg(adap,
-                            PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
+                            PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
                             adap->vres.ocq.start);
                t4_read_reg(adap,
-                           PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
+                           PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
        }
 }
 
@@ -4936,38 +4732,38 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
        t4_sge_init(adap);
 
        /* tweak some settings */
-       t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
-       t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
-       t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
-       v = t4_read_reg(adap, TP_PIO_DATA);
-       t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+       t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
+       t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
+       t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
+       v = t4_read_reg(adap, TP_PIO_DATA_A);
+       t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
 
        /* first 4 Tx modulation queues point to consecutive Tx channels */
        adap->params.tp.tx_modq_map = 0xE4;
-       t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
-                    V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
+       t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
+                    TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
 
        /* associate each Tx modulation queue with consecutive Tx channels */
        v = 0x84218421;
-       t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
-                         &v, 1, A_TP_TX_SCHED_HDR);
-       t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
-                         &v, 1, A_TP_TX_SCHED_FIFO);
-       t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
-                         &v, 1, A_TP_TX_SCHED_PCMD);
+       t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                         &v, 1, TP_TX_SCHED_HDR_A);
+       t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                         &v, 1, TP_TX_SCHED_FIFO_A);
+       t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                         &v, 1, TP_TX_SCHED_PCMD_A);
 
 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
        if (is_offload(adap)) {
-               t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
-                            V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
-                            V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
-                            V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
-                            V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
-               t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
-                            V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
-                            V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
-                            V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
-                            V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+               t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
+                            TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+                            TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+                            TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+                            TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+               t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
+                            TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+                            TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+                            TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+                            TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
        }
 
        /* get basic stuff going */
@@ -5013,16 +4809,16 @@ static int adap_init0_tweaks(struct adapter *adapter)
                        rx_dma_offset);
                rx_dma_offset = 2;
        }
-       t4_set_reg_field(adapter, SGE_CONTROL,
-                        PKTSHIFT_MASK,
-                        PKTSHIFT(rx_dma_offset));
+       t4_set_reg_field(adapter, SGE_CONTROL_A,
+                        PKTSHIFT_V(PKTSHIFT_M),
+                        PKTSHIFT_V(rx_dma_offset));
 
        /*
         * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
         * adds the pseudo header itself.
         */
-       t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
-                              CSUM_HAS_PSEUDO_HDR, 0);
+       t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
+                              CSUM_HAS_PSEUDO_HDR_F, 0);
 
        return 0;
 }
@@ -5046,7 +4842,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
         */
        if (reset) {
                ret = t4_fw_reset(adapter, adapter->mbox,
-                                 PIORSTMODE | PIORST);
+                                 PIORSTMODE_F | PIORST_F);
                if (ret < 0)
                        goto bye;
        }
@@ -5212,12 +5008,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
        if (ret < 0)
                goto bye;
 
-       /*
-        * Return successfully and note that we're operating with parameters
-        * not supplied by the driver, rather than from hard-wired
-        * initialization constants burried in the driver.
+       /* Emit Firmware Configuration File information and return
+        * successfully.
         */
-       adapter->flags |= USING_SOFT_PARAMS;
        dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
                 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
                 config_name, finiver, cfcsum);
@@ -5235,249 +5028,6 @@ bye:
        return ret;
 }
 
-/*
- * Attempt to initialize the adapter via hard-coded, driver supplied
- * parameters ...
- */
-static int adap_init0_no_config(struct adapter *adapter, int reset)
-{
-       struct sge *s = &adapter->sge;
-       struct fw_caps_config_cmd caps_cmd;
-       u32 v;
-       int i, ret;
-
-       /*
-        * Reset device if necessary
-        */
-       if (reset) {
-               ret = t4_fw_reset(adapter, adapter->mbox,
-                                 PIORSTMODE | PIORST);
-               if (ret < 0)
-                       goto bye;
-       }
-
-       /*
-        * Get device capabilities and select which we'll be using.
-        */
-       memset(&caps_cmd, 0, sizeof(caps_cmd));
-       caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
-                                    FW_CMD_REQUEST_F | FW_CMD_READ_F);
-       caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
-       ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
-                        &caps_cmd);
-       if (ret < 0)
-               goto bye;
-
-       if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
-               if (!vf_acls)
-                       caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
-               else
-                       caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
-       } else if (vf_acls) {
-               dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
-               goto bye;
-       }
-       caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
-                             FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
-       ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
-                        NULL);
-       if (ret < 0)
-               goto bye;
-
-       /*
-        * Tweak configuration based on system architecture, module
-        * parameters, etc.
-        */
-       ret = adap_init0_tweaks(adapter);
-       if (ret < 0)
-               goto bye;
-
-       /*
-        * Select RSS Global Mode we want to use.  We use "Basic Virtual"
-        * mode which maps each Virtual Interface to its own section of
-        * the RSS Table and we turn on all map and hash enables ...
-        */
-       adapter->flags |= RSS_TNLALLLOOKUP;
-       ret = t4_config_glbl_rss(adapter, adapter->mbox,
-                                FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
-                                FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
-                                FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F |
-                                ((adapter->flags & RSS_TNLALLLOOKUP) ?
-                                       FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F : 0));
-       if (ret < 0)
-               goto bye;
-
-       /*
-        * Set up our own fundamental resource provisioning ...
-        */
-       ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
-                         PFRES_NEQ, PFRES_NETHCTRL,
-                         PFRES_NIQFLINT, PFRES_NIQ,
-                         PFRES_TC, PFRES_NVI,
-                         FW_PFVF_CMD_CMASK_M,
-                         pfvfres_pmask(adapter, adapter->fn, 0),
-                         PFRES_NEXACTF,
-                         PFRES_R_CAPS, PFRES_WX_CAPS);
-       if (ret < 0)
-               goto bye;
-
-       /*
-        * Perform low level SGE initialization.  We need to do this before we
-        * send the firmware the INITIALIZE command because that will cause
-        * any other PF Drivers which are waiting for the Master
-        * Initialization to proceed forward.
-        */
-       for (i = 0; i < SGE_NTIMERS - 1; i++)
-               s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
-       s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
-       s->counter_val[0] = 1;
-       for (i = 1; i < SGE_NCOUNTERS; i++)
-               s->counter_val[i] = min(intr_cnt[i - 1],
-                                       THRESHOLD_0_GET(THRESHOLD_0_MASK));
-       t4_sge_init(adapter);
-
-#ifdef CONFIG_PCI_IOV
-       /*
-        * Provision resource limits for Virtual Functions.  We currently
-        * grant them all the same static resource limits except for the Port
-        * Access Rights Mask which we're assigning based on the PF.  All of
-        * the static provisioning stuff for both the PF and VF really needs
-        * to be managed in a persistent manner for each device which the
-        * firmware controls.
-        */
-       {
-               int pf, vf;
-
-               for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
-                       if (num_vf[pf] <= 0)
-                               continue;
-
-                       /* VF numbering starts at 1! */
-                       for (vf = 1; vf <= num_vf[pf]; vf++) {
-                               ret = t4_cfg_pfvf(adapter, adapter->mbox,
-                                                 pf, vf,
-                                                 VFRES_NEQ, VFRES_NETHCTRL,
-                                                 VFRES_NIQFLINT, VFRES_NIQ,
-                                                 VFRES_TC, VFRES_NVI,
-                                                 FW_PFVF_CMD_CMASK_M,
-                                                 pfvfres_pmask(
-                                                 adapter, pf, vf),
-                                                 VFRES_NEXACTF,
-                                                 VFRES_R_CAPS, VFRES_WX_CAPS);
-                               if (ret < 0)
-                                       dev_warn(adapter->pdev_dev,
-                                                "failed to "\
-                                                "provision pf/vf=%d/%d; "
-                                                "err=%d\n", pf, vf, ret);
-                       }
-               }
-       }
-#endif
-
-       /*
-        * Set up the default filter mode.  Later we'll want to implement this
-        * via a firmware command, etc. ...  This needs to be done before the
-        * firmare initialization command ...  If the selected set of fields
-        * isn't equal to the default value, we'll need to make sure that the
-        * field selections will fit in the 36-bit budget.
-        */
-       if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
-               int j, bits = 0;
-
-               for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
-                       switch (tp_vlan_pri_map & (1 << j)) {
-                       case 0:
-                               /* compressed filter field not enabled */
-                               break;
-                       case FCOE_MASK:
-                               bits +=  1;
-                               break;
-                       case PORT_MASK:
-                               bits +=  3;
-                               break;
-                       case VNIC_ID_MASK:
-                               bits += 17;
-                               break;
-                       case VLAN_MASK:
-                               bits += 17;
-                               break;
-                       case TOS_MASK:
-                               bits +=  8;
-                               break;
-                       case PROTOCOL_MASK:
-                               bits +=  8;
-                               break;
-                       case ETHERTYPE_MASK:
-                               bits += 16;
-                               break;
-                       case MACMATCH_MASK:
-                               bits +=  9;
-                               break;
-                       case MPSHITTYPE_MASK:
-                               bits +=  3;
-                               break;
-                       case FRAGMENTATION_MASK:
-                               bits +=  1;
-                               break;
-                       }
-
-               if (bits > 36) {
-                       dev_err(adapter->pdev_dev,
-                               "tp_vlan_pri_map=%#x needs %d bits > 36;"\
-                               " using %#x\n", tp_vlan_pri_map, bits,
-                               TP_VLAN_PRI_MAP_DEFAULT);
-                       tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
-               }
-       }
-       v = tp_vlan_pri_map;
-       t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
-                         &v, 1, TP_VLAN_PRI_MAP);
-
-       /*
-        * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
-        * to support any of the compressed filter fields above.  Newer
-        * versions of the firmware do this automatically but it doesn't hurt
-        * to set it here.  Meanwhile, we do _not_ need to set Lookup Every
-        * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
-        * since the firmware automatically turns this on and off when we have
-        * a non-zero number of filters active (since it does have a
-        * performance impact).
-        */
-       if (tp_vlan_pri_map)
-               t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
-                                FIVETUPLELOOKUP_MASK,
-                                FIVETUPLELOOKUP_MASK);
-
-       /*
-        * Tweak some settings.
-        */
-       t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
-                    RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
-                    PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
-                    KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
-
-       /*
-        * Get basic stuff going by issuing the Firmware Initialize command.
-        * Note that this _must_ be after all PFVF commands ...
-        */
-       ret = t4_fw_initialize(adapter, adapter->mbox);
-       if (ret < 0)
-               goto bye;
-
-       /*
-        * Return successfully!
-        */
-       dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
-                "driver parameters\n");
-       return 0;
-
-       /*
-        * Something bad happened.  Return the error ...
-        */
-bye:
-       return ret;
-}
-
 static struct fw_info fw_info_array[] = {
        {
                .chip = CHELSIO_T4,
@@ -5529,6 +5079,8 @@ static int adap_init0(struct adapter *adap)
        enum dev_state state;
        u32 params[7], val[7];
        struct fw_caps_config_cmd caps_cmd;
+       struct fw_devlog_cmd devlog_cmd;
+       u32 devlog_meminfo;
        int reset = 1;
 
        /* Contact FW, advertising Master capability */
@@ -5590,8 +5142,7 @@ static int adap_init0(struct adapter *adap)
                                 state, &reset);
 
                /* Cleaning up */
-               if (fw != NULL)
-                       release_firmware(fw);
+               release_firmware(fw);
                t4_free_mem(card_fw);
 
                if (ret < 0)
@@ -5609,6 +5160,30 @@ static int adap_init0(struct adapter *adap)
        if (ret < 0)
                goto bye;
 
+       /* Read firmware device log parameters.  We really need to find a way
+        * to get these parameters initialized with some default values (which
+        * are likely to be correct) for the case where we either don't
+        * attache to the firmware or it's crashed when we probe the adapter.
+        * That way we'll still be able to perform early firmware startup
+        * debugging ...  If the request to get the Firmware's Device Log
+        * parameters fails, we'll live so we don't make that a fatal error.
+        */
+       memset(&devlog_cmd, 0, sizeof(devlog_cmd));
+       devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
+                                      FW_CMD_REQUEST_F | FW_CMD_READ_F);
+       devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
+       ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
+                        &devlog_cmd);
+       if (ret == 0) {
+               devlog_meminfo =
+                       ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
+               adap->params.devlog.memtype =
+                       FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
+               adap->params.devlog.start =
+                       FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
+               adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
+       }
+
        /*
         * Find out what ports are available to us.  Note that we need to do
         * this before calling adap_init0_no_config() since it needs nports
@@ -5624,88 +5199,58 @@ static int adap_init0(struct adapter *adap)
        adap->params.nports = hweight32(port_vec);
        adap->params.portvec = port_vec;
 
-       /*
-        * If the firmware is initialized already (and we're not forcing a
-        * master initialization), note that we're living with existing
-        * adapter parameters.  Otherwise, it's time to try initializing the
-        * adapter ...
+       /* If the firmware is initialized already, emit a simply note to that
+        * effect. Otherwise, it's time to try initializing the adapter.
         */
        if (state == DEV_STATE_INIT) {
                dev_info(adap->pdev_dev, "Coming up as %s: "\
                         "Adapter already initialized\n",
                         adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
-               adap->flags |= USING_SOFT_PARAMS;
        } else {
                dev_info(adap->pdev_dev, "Coming up as MASTER: "\
                         "Initializing adapter\n");
-               /*
-                * If the firmware doesn't support Configuration
-                * Files warn user and exit,
+
+               /* Find out whether we're dealing with a version of the
+                * firmware which has configuration file support.
                 */
-               if (ret < 0)
-                       dev_warn(adap->pdev_dev, "Firmware doesn't support "
-                                "configuration file.\n");
-               if (force_old_init)
-                       ret = adap_init0_no_config(adap, reset);
-               else {
-                       /*
-                        * Find out whether we're dealing with a version of
-                        * the firmware which has configuration file support.
-                        */
-                       params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
-                                    FW_PARAMS_PARAM_X_V(
-                                            FW_PARAMS_PARAM_DEV_CF));
-                       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
-                                             params, val);
-
-                       /*
-                        * If the firmware doesn't support Configuration
-                        * Files, use the old Driver-based, hard-wired
-                        * initialization.  Otherwise, try using the
-                        * Configuration File support and fall back to the
-                        * Driver-based initialization if there's no
-                        * Configuration File found.
-                        */
-                       if (ret < 0)
-                               ret = adap_init0_no_config(adap, reset);
-                       else {
-                               /*
-                                * The firmware provides us with a memory
-                                * buffer where we can load a Configuration
-                                * File from the host if we want to override
-                                * the Configuration File in flash.
-                                */
+               params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+                            FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
+               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
+                                     params, val);
 
-                               ret = adap_init0_config(adap, reset);
-                               if (ret == -ENOENT) {
-                                       dev_info(adap->pdev_dev,
-                                           "No Configuration File present "
-                                           "on adapter. Using hard-wired "
-                                           "configuration parameters.\n");
-                                       ret = adap_init0_no_config(adap, reset);
-                               }
-                       }
+               /* If the firmware doesn't support Configuration Files,
+                * return an error.
+                */
+               if (ret < 0) {
+                       dev_err(adap->pdev_dev, "firmware doesn't support "
+                               "Firmware Configuration Files\n");
+                       goto bye;
+               }
+
+               /* The firmware provides us with a memory buffer where we can
+                * load a Configuration File from the host if we want to
+                * override the Configuration File in flash.
+                */
+               ret = adap_init0_config(adap, reset);
+               if (ret == -ENOENT) {
+                       dev_err(adap->pdev_dev, "no Configuration File "
+                               "present on adapter.\n");
+                       goto bye;
                }
                if (ret < 0) {
-                       dev_err(adap->pdev_dev,
-                               "could not initialize adapter, error %d\n",
-                               -ret);
+                       dev_err(adap->pdev_dev, "could not initialize "
+                               "adapter, error %d\n", -ret);
                        goto bye;
                }
        }
 
-       /*
-        * If we're living with non-hard-coded parameters (either from a
-        * Firmware Configuration File or values programmed by a different PF
-        * Driver), give the SGE code a chance to pull in anything that it
-        * needs ...  Note that this must be called after we retrieve our VPD
-        * parameters in order to know how to convert core ticks to seconds.
+       /* Give the SGE code a chance to pull in anything that it needs ...
+        * Note that this must be called after we retrieve our VPD parameters
+        * in order to know how to convert core ticks to seconds, etc.
         */
-       if (adap->flags & USING_SOFT_PARAMS) {
-               ret = t4_sge_init(adap);
-               if (ret < 0)
-                       goto bye;
-       }
+       ret = t4_sge_init(adap);
+       if (ret < 0)
+               goto bye;
 
        if (is_bypass_device(adap->pdev->device))
                adap->params.bypass = 1;
@@ -5739,6 +5284,14 @@ static int adap_init0(struct adapter *adap)
        adap->tids.nftids = val[4] - val[3] + 1;
        adap->sge.ingr_start = val[5];
 
+       params[0] = FW_PARAM_PFVF(CLIP_START);
+       params[1] = FW_PARAM_PFVF(CLIP_END);
+       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+       if (ret < 0)
+               goto bye;
+       adap->clipt_start = val[0];
+       adap->clipt_end = val[1];
+
        /* query params related to active filter region */
        params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
        params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
@@ -6401,7 +5954,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_unmap_bar0;
 
        /* We control everything through one PF */
-       func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
+       func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
        if (func != ent->driver_data) {
                iounmap(regs);
                pci_disable_device(pdev);
@@ -6467,9 +6020,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 
        if (!is_t4(adapter->params.chip)) {
-               s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
-               qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
-                     SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
+               s_qpp = (QUEUESPERPAGEPF0_S +
+                       (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
+                       adapter->fn);
+               qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
+                     SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
                num_seg = PAGE_SIZE / SEGMENT_SIZE;
 
                /* Each segment size is 128B. Write coalescing is enabled only
@@ -6557,6 +6112,18 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                adapter->params.offload = 0;
        }
 
+#if IS_ENABLED(CONFIG_IPV6)
+       adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
+                                         adapter->clipt_end);
+       if (!adapter->clipt) {
+               /* We tolerate a lack of clip_table, giving up
+                * some functionality
+                */
+               dev_warn(&pdev->dev,
+                        "could not allocate Clip table, continuing\n");
+               adapter->params.offload = 0;
+       }
+#endif
        if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
                dev_warn(&pdev->dev, "could not allocate TID table, "
                         "continuing\n");
@@ -6682,6 +6249,9 @@ static void remove_one(struct pci_dev *pdev)
                        cxgb_down(adapter);
 
                free_some_resources(adapter);
+#if IS_ENABLED(CONFIG_IPV6)
+               t4_cleanup_clip_tbl(adapter);
+#endif
                iounmap(adapter->regs);
                if (!is_t4(adapter->params.chip))
                        iounmap(adapter->bar2);
@@ -6720,7 +6290,10 @@ static int __init cxgb4_init_module(void)
                debugfs_remove(cxgb4_debugfs_root);
 
 #if IS_ENABLED(CONFIG_IPV6)
-       register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+       if (!inet6addr_registered) {
+               register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+               inet6addr_registered = true;
+       }
 #endif
 
        return ret;
@@ -6729,7 +6302,10 @@ static int __init cxgb4_init_module(void)
 static void __exit cxgb4_cleanup_module(void)
 {
 #if IS_ENABLED(CONFIG_IPV6)
-       unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+       if (inet6addr_registered) {
+               unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+               inet6addr_registered = false;
+       }
 #endif
        pci_unregister_driver(&cxgb4_driver);
        debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
index 152b4c4c7809599a0a38112b7b55ece4b7816e3e..78ab4d406ce277509eb6da61780903129596e7ee 100644 (file)
@@ -173,9 +173,6 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
                               unsigned char port, unsigned char mask);
 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
                               unsigned int queue, bool ipv6);
-int cxgb4_clip_get(const struct net_device *dev, const struct in6_addr *lip);
-int cxgb4_clip_release(const struct net_device *dev,
-                      const struct in6_addr *lip);
 
 static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
 {
index a047baa9fd0421289e6976b926bfbc554b38edc3..252efc29321f4e7c57a53e631b520ea8820f6900 100644 (file)
@@ -46,6 +46,7 @@
 #include "t4_msg.h"
 #include "t4fw_api.h"
 #include "t4_regs.h"
+#include "t4_values.h"
 
 #define VLAN_NONE 0xfff
 
@@ -150,8 +151,8 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
 
        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
                                        e->idx | (sync ? F_SYNC_WR : 0) |
-                                       TID_QID(adap->sge.fw_evtq.abs_id)));
-       req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
+                                       TID_QID_V(adap->sge.fw_evtq.abs_id)));
+       req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
        req->l2t_idx = htons(e->idx);
        req->vlan = htons(e->vlan);
        if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
@@ -425,7 +426,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
         * in the Compressed Filter Tuple.
         */
        if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
-               ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
+               ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift;
 
        if (tp->port_shift >= 0)
                ntuple |= (u64)l2t->lport << tp->port_shift;
@@ -439,9 +440,9 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
                u32 pf = FW_VIID_PFN_G(viid);
                u32 vld = FW_VIID_VIVLD_G(viid);
 
-               ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
-                               V_FT_VNID_ID_PF(pf) |
-                               V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
+               ntuple |= (u64)(FT_VNID_ID_VF_V(vf) |
+                               FT_VNID_ID_PF_V(pf) |
+                               FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift;
        }
 
        return ntuple;
index ebf935a1e352cecd43746aa1c2567a121a404b8e..b4b9f6048fe730dc1287a648a5e61c7f2d92dd7d 100644 (file)
 #include <linux/export.h>
 #include <net/ipv6.h>
 #include <net/tcp.h>
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#endif /* CONFIG_NET_RX_BUSY_POLL */
 #include "cxgb4.h"
 #include "t4_regs.h"
+#include "t4_values.h"
 #include "t4_msg.h"
 #include "t4fw_api.h"
 
@@ -521,10 +525,12 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
 {
        u32 val;
        if (q->pend_cred >= 8) {
-               val = PIDX(q->pend_cred / 8);
-               if (!is_t4(adap->params.chip))
-                       val |= DBTYPE(1);
-               val |= DBPRIO(1);
+               if (is_t4(adap->params.chip))
+                       val = PIDX_V(q->pend_cred / 8);
+               else
+                       val = PIDX_T5_V(q->pend_cred / 8) |
+                               DBTYPE_F;
+               val |= DBPRIO_F;
                wmb();
 
                /* If we don't have access to the new User Doorbell (T5+), use
@@ -532,10 +538,10 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
                 * mechanism.
                 */
                if (unlikely(q->bar2_addr == NULL)) {
-                       t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
-                                    val | QID(q->cntxt_id));
+                       t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+                                    val | QID_V(q->cntxt_id));
                } else {
-                       writel(val | QID(q->bar2_qid),
+                       writel(val | QID_V(q->bar2_qid),
                               q->bar2_addr + SGE_UDB_KDOORBELL);
 
                        /* This Write memory Barrier will force the write to
@@ -818,7 +824,8 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
                sgl->addr0 = cpu_to_be64(addr[1]);
        }
 
-       sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
+       sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+                             ULPTX_NSGE_V(nfrags));
        if (likely(--nfrags == 0))
                return;
        /*
@@ -884,7 +891,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
         * doorbell mechanism; otherwise use the new BAR2 mechanism.
         */
        if (unlikely(q->bar2_addr == NULL)) {
-               u32 val = PIDX(n);
+               u32 val = PIDX_V(n);
                unsigned long flags;
 
                /* For T4 we need to participate in the Doorbell Recovery
@@ -892,14 +899,14 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
                 */
                spin_lock_irqsave(&q->db_lock, flags);
                if (!q->db_disabled)
-                       t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
-                                    QID(q->cntxt_id) | val);
+                       t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
+                                    QID_V(q->cntxt_id) | val);
                else
                        q->db_pidx_inc += n;
                q->db_pidx = q->pidx;
                spin_unlock_irqrestore(&q->db_lock, flags);
        } else {
-               u32 val = PIDX_T5(n);
+               u32 val = PIDX_T5_V(n);
 
                /* T4 and later chips share the same PIDX field offset within
                 * the doorbell, but T5 and later shrank the field in order to
@@ -907,7 +914,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
                 * large in the first place (14 bits) so we just use the T5
                 * and later limits and warn if a Queue ID is too large.
                 */
-               WARN_ON(val & DBPRIO(1));
+               WARN_ON(val & DBPRIO_F);
 
                /* If we're only writing a single TX Descriptor and we can use
                 * Inferred QID registers, we can use the Write Combining
@@ -923,7 +930,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
                                      (q->bar2_addr + SGE_UDB_WCDOORBELL),
                                      wr);
                } else {
-                       writel(val | QID(q->bar2_qid),
+                       writel(val | QID_V(q->bar2_qid),
                               q->bar2_addr + SGE_UDB_KDOORBELL);
                }
 
@@ -1150,9 +1157,9 @@ out_free: dev_kfree_skb_any(skb);
                        cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
        }
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                q->vlan_ins++;
-               cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+               cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
        }
 
        cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
@@ -1716,6 +1723,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
        skb->truesize += skb->data_len;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
        skb_record_rx_queue(skb, rxq->rspq.idx);
+       skb_mark_napi_id(skb, &rxq->rspq.napi);
        if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
                skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
                             PKT_HASH_TYPE_L3);
@@ -1758,7 +1766,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
        pkt = (const struct cpl_rx_pkt *)rsp;
        csum_ok = pkt->csum_calc && !pkt->err_vec &&
                  (q->netdev->features & NETIF_F_RXCSUM);
-       if ((pkt->l2info & htonl(RXF_TCP)) &&
+       if ((pkt->l2info & htonl(RXF_TCP_F)) &&
+           !(cxgb_poll_busy_polling(q)) &&
            (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
                do_gro(rxq, si, pkt);
                return 0;
@@ -1780,11 +1789,11 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
 
        rxq->stats.pkts++;
 
-       if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
+       if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
                if (!pkt->ip_frag) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        rxq->stats.rx_cso++;
-               } else if (pkt->l2info & htonl(RXF_IP)) {
+               } else if (pkt->l2info & htonl(RXF_IP_F)) {
                        __sum16 c = (__force __sum16)pkt->csum;
                        skb->csum = csum_unfold(c);
                        skb->ip_summed = CHECKSUM_COMPLETE;
@@ -1797,6 +1806,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
                rxq->stats.vlan_ex++;
        }
+       skb_mark_napi_id(skb, &q->napi);
        netif_receive_skb(skb);
        return 0;
 }
@@ -1959,6 +1969,38 @@ static int process_responses(struct sge_rspq *q, int budget)
        return budget - budget_left;
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+int cxgb_busy_poll(struct napi_struct *napi)
+{
+       struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
+       unsigned int params, work_done;
+       u32 val;
+
+       if (!cxgb_poll_lock_poll(q))
+               return LL_FLUSH_BUSY;
+
+       work_done = process_responses(q, 4);
+       params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN;
+       q->next_intr_params = params;
+       val = CIDXINC_V(work_done) | SEINTARM_V(params);
+
+       /* If we don't have access to the new User GTS (T5+), use the old
+        * doorbell mechanism; otherwise use the new BAR2 mechanism.
+        */
+       if (unlikely(!q->bar2_addr))
+               t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
+                            val | INGRESSQID_V((u32)q->cntxt_id));
+       else {
+               writel(val | INGRESSQID_V(q->bar2_qid),
+                      q->bar2_addr + SGE_UDB_GTS);
+               wmb();
+       }
+
+       cxgb_poll_unlock_poll(q);
+       return work_done;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 /**
  *     napi_rx_handler - the NAPI handler for Rx processing
  *     @napi: the napi instance
@@ -1974,9 +2016,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
 {
        unsigned int params;
        struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
-       int work_done = process_responses(q, budget);
+       int work_done;
        u32 val;
 
+       if (!cxgb_poll_lock_napi(q))
+               return budget;
+
+       work_done = process_responses(q, budget);
        if (likely(work_done < budget)) {
                int timer_index;
 
@@ -2001,19 +2047,20 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
        } else
                params = QINTR_TIMER_IDX(7);
 
-       val = CIDXINC(work_done) | SEINTARM(params);
+       val = CIDXINC_V(work_done) | SEINTARM_V(params);
 
        /* If we don't have access to the new User GTS (T5+), use the old
         * doorbell mechanism; otherwise use the new BAR2 mechanism.
         */
        if (unlikely(q->bar2_addr == NULL)) {
-               t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS),
-                            val | INGRESSQID((u32)q->cntxt_id));
+               t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
+                            val | INGRESSQID_V((u32)q->cntxt_id));
        } else {
-               writel(val | INGRESSQID(q->bar2_qid),
+               writel(val | INGRESSQID_V(q->bar2_qid),
                       q->bar2_addr + SGE_UDB_GTS);
                wmb();
        }
+       cxgb_poll_unlock_napi(q);
        return work_done;
 }
 
@@ -2056,16 +2103,16 @@ static unsigned int process_intrq(struct adapter *adap)
                rspq_next(q);
        }
 
-       val =  CIDXINC(credits) | SEINTARM(q->intr_params);
+       val =  CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
 
        /* If we don't have access to the new User GTS (T5+), use the old
         * doorbell mechanism; otherwise use the new BAR2 mechanism.
         */
        if (unlikely(q->bar2_addr == NULL)) {
-               t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
-                            val | INGRESSQID(q->cntxt_id));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
+                            val | INGRESSQID_V(q->cntxt_id));
        } else {
-               writel(val | INGRESSQID(q->bar2_qid),
+               writel(val | INGRESSQID_V(q->bar2_qid),
                       q->bar2_addr + SGE_UDB_GTS);
                wmb();
        }
@@ -2095,7 +2142,7 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
 {
        struct adapter *adap = cookie;
 
-       t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
+       t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
        if (t4_slow_intr_handler(adap) | process_intrq(adap))
                return IRQ_HANDLED;
        return IRQ_NONE;             /* probably shared interrupt */
@@ -2142,9 +2189,9 @@ static void sge_rx_timer_cb(unsigned long data)
                        }
                }
 
-       t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
-       idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
-       idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+       t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
+       idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
+       idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
 
        for (i = 0; i < 2; i++) {
                u32 debug0, debug11;
@@ -2188,12 +2235,12 @@ static void sge_rx_timer_cb(unsigned long data)
                /* Read and save the SGE IDMA State and Queue ID information.
                 * We do this every time in case it changes across time ...
                 */
-               t4_write_reg(adap, SGE_DEBUG_INDEX, 0);
-               debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+               t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
+               debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
                s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
 
-               t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
-               debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+               t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
+               debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
                s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
 
                CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
@@ -2337,6 +2384,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                goto err;
 
        netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
+       napi_hash_add(&iq->napi);
        iq->cur_desc = iq->desc;
        iq->cidx = 0;
        iq->gen = 1;
@@ -2594,6 +2642,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
                   rq->cntxt_id, fl_id, 0xffff);
        dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
                          rq->desc, rq->phys_addr);
+       napi_hash_del(&rq->napi);
        netif_napi_del(&rq->napi);
        rq->netdev = NULL;
        rq->cntxt_id = rq->abs_id = 0;
@@ -2738,24 +2787,11 @@ void t4_sge_stop(struct adapter *adap)
 }
 
 /**
- *     t4_sge_init - initialize SGE
+ *     t4_sge_init_soft - grab core SGE values needed by SGE code
  *     @adap: the adapter
  *
- *     Performs SGE initialization needed every time after a chip reset.
- *     We do not initialize any of the queues here, instead the driver
- *     top-level must request them individually.
- *
- *     Called in two different modes:
- *
- *      1. Perform actual hardware initialization and record hard-coded
- *         parameters which were used.  This gets used when we're the
- *         Master PF and the Firmware Configuration File support didn't
- *         work for some reason.
- *
- *      2. We're not the Master PF or initialization was performed with
- *         a Firmware Configuration File.  In this case we need to grab
- *         any of the SGE operating parameters that we need to have in
- *         order to do our job and make sure we can live with them ...
+ *     We need to grab the SGE operating parameters that we need to have
+ *     in order to do our job and make sure we can live with them.
  */
 
 static int t4_sge_init_soft(struct adapter *adap)
@@ -2770,8 +2806,8 @@ static int t4_sge_init_soft(struct adapter *adap)
         * process_responses() and that only packet data is going to the
         * Free Lists.
         */
-       if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
-           RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+       if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
+           RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
                dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
                return -EINVAL;
        }
@@ -2785,7 +2821,7 @@ static int t4_sge_init_soft(struct adapter *adap)
         * XXX meet our needs!
         */
        #define READ_FL_BUF(x) \
-               t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
+               t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
 
        fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
        fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
@@ -2823,99 +2859,38 @@ static int t4_sge_init_soft(struct adapter *adap)
         * Retrieve our RX interrupt holdoff timer values and counter
         * threshold values from the SGE parameters.
         */
-       timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
-       timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
-       timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
+       timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
+       timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
+       timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
        s->timer_val[0] = core_ticks_to_us(adap,
-               TIMERVALUE0_GET(timer_value_0_and_1));
+               TIMERVALUE0_G(timer_value_0_and_1));
        s->timer_val[1] = core_ticks_to_us(adap,
-               TIMERVALUE1_GET(timer_value_0_and_1));
+               TIMERVALUE1_G(timer_value_0_and_1));
        s->timer_val[2] = core_ticks_to_us(adap,
-               TIMERVALUE2_GET(timer_value_2_and_3));
+               TIMERVALUE2_G(timer_value_2_and_3));
        s->timer_val[3] = core_ticks_to_us(adap,
-               TIMERVALUE3_GET(timer_value_2_and_3));
+               TIMERVALUE3_G(timer_value_2_and_3));
        s->timer_val[4] = core_ticks_to_us(adap,
-               TIMERVALUE4_GET(timer_value_4_and_5));
+               TIMERVALUE4_G(timer_value_4_and_5));
        s->timer_val[5] = core_ticks_to_us(adap,
-               TIMERVALUE5_GET(timer_value_4_and_5));
+               TIMERVALUE5_G(timer_value_4_and_5));
 
-       ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
-       s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
-       s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
-       s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
-       s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
-
-       return 0;
-}
-
-static int t4_sge_init_hard(struct adapter *adap)
-{
-       struct sge *s = &adap->sge;
-
-       /*
-        * Set up our basic SGE mode to deliver CPL messages to our Ingress
-        * Queue and Packet Date to the Free List.
-        */
-       t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
-                        RXPKTCPLMODE_MASK);
-
-       /*
-        * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
-        * and generate an interrupt when this occurs so we can recover.
-        */
-       if (is_t4(adap->params.chip)) {
-               t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
-                                V_HP_INT_THRESH(M_HP_INT_THRESH) |
-                                V_LP_INT_THRESH(M_LP_INT_THRESH),
-                                V_HP_INT_THRESH(dbfifo_int_thresh) |
-                                V_LP_INT_THRESH(dbfifo_int_thresh));
-       } else {
-               t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
-                                V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
-                                V_LP_INT_THRESH_T5(dbfifo_int_thresh));
-               t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
-                                V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
-                                V_HP_INT_THRESH_T5(dbfifo_int_thresh));
-       }
-       t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
-                       F_ENABLE_DROP);
-
-       /*
-        * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
-        * t4_fixup_host_params().
-        */
-       s->fl_pg_order = FL_PG_ORDER;
-       if (s->fl_pg_order)
-               t4_write_reg(adap,
-                            SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
-                            PAGE_SIZE << FL_PG_ORDER);
-       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
-                    FL_MTU_SMALL_BUFSIZE(adap));
-       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
-                    FL_MTU_LARGE_BUFSIZE(adap));
-
-       /*
-        * Note that the SGE Ingress Packet Count Interrupt Threshold and
-        * Timer Holdoff values must be supplied by our caller.
-        */
-       t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
-                    THRESHOLD_0(s->counter_val[0]) |
-                    THRESHOLD_1(s->counter_val[1]) |
-                    THRESHOLD_2(s->counter_val[2]) |
-                    THRESHOLD_3(s->counter_val[3]));
-       t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
-                    TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
-                    TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
-       t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
-                    TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
-                    TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
-       t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
-                    TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
-                    TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
+       ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
+       s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
+       s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
+       s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
+       s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
 
        return 0;
 }
 
+/**
+ *     t4_sge_init - initialize SGE
+ *     @adap: the adapter
+ *
+ *     Perform low-level SGE code initialization needed every time after a
+ *     chip reset.
+ */
 int t4_sge_init(struct adapter *adap)
 {
        struct sge *s = &adap->sge;
@@ -2927,9 +2902,9 @@ int t4_sge_init(struct adapter *adap)
         * Ingress Padding Boundary and Egress Status Page Size are set up by
         * t4_fixup_host_params().
         */
-       sge_control = t4_read_reg(adap, SGE_CONTROL);
-       s->pktshift = PKTSHIFT_GET(sge_control);
-       s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
+       sge_control = t4_read_reg(adap, SGE_CONTROL_A);
+       s->pktshift = PKTSHIFT_G(sge_control);
+       s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
 
        /* T4 uses a single control field to specify both the PCIe Padding and
         * Packing Boundary.  T5 introduced the ability to specify these
@@ -2937,8 +2912,8 @@ int t4_sge_init(struct adapter *adap)
         * within Packed Buffer Mode is the maximum of these two
         * specifications.
         */
-       ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) +
-                              X_INGPADBOUNDARY_SHIFT);
+       ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
+                              INGPADBOUNDARY_SHIFT_X);
        if (is_t4(adap->params.chip)) {
                s->fl_align = ingpadboundary;
        } else {
@@ -2956,10 +2931,7 @@ int t4_sge_init(struct adapter *adap)
                s->fl_align = max(ingpadboundary, ingpackboundary);
        }
 
-       if (adap->flags & USING_SOFT_PARAMS)
-               ret = t4_sge_init_soft(adap);
-       else
-               ret = t4_sge_init_hard(adap);
+       ret = t4_sge_init_soft(adap);
        if (ret < 0)
                return ret;
 
@@ -2975,11 +2947,11 @@ int t4_sge_init(struct adapter *adap)
         * buffers and a new field which only applies to Packed Mode Free List
         * buffers.
         */
-       sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL);
+       sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
        if (is_t4(adap->params.chip))
-               egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl);
+               egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
        else
-               egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl);
+               egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
        s->fl_starve_thres = 2*egress_threshold + 1;
 
        setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
index c132d9030729d9e20f5f1051611fd35d46098128..2c13e8005319c2f244b6634db6a785d6a493d790 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/delay.h>
 #include "cxgb4.h"
 #include "t4_regs.h"
+#include "t4_values.h"
 #include "t4fw_api.h"
 
 /**
@@ -149,20 +150,20 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
  */
 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
 {
-       u32 req = ENABLE | FUNCTION(adap->fn) | reg;
+       u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg);
 
        if (is_t4(adap->params.chip))
-               req |= F_LOCALCFG;
+               req |= LOCALCFG_F;
 
-       t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
-       *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
+       t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
+       *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
 
        /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
         * Configuration Space read.  (None of the other fields matter when
         * ENABLE is 0 so a simple register write is easier than a
         * read-modify-write via t4_set_reg_field().)
         */
-       t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
+       t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
 }
 
 /*
@@ -187,8 +188,8 @@ static void t4_report_fw_error(struct adapter *adap)
        };
        u32 pcie_fw;
 
-       pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
-       if (pcie_fw & PCIE_FW_ERR)
+       pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+       if (pcie_fw & PCIE_FW_ERR_F)
                dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
                        reason[PCIE_FW_EVAL_G(pcie_fw)]);
 }
@@ -264,8 +265,8 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
        u64 res;
        int i, ms, delay_idx;
        const __be64 *p = cmd;
-       u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
-       u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
+       u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
+       u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
 
        if ((size & 15) || size > MBOX_LEN)
                return -EINVAL;
@@ -277,9 +278,9 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
        if (adap->pdev->error_state != pci_channel_io_normal)
                return -EIO;
 
-       v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
+       v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
        for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
-               v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
+               v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
 
        if (v != MBOX_OWNER_DRV)
                return v ? -EBUSY : -ETIMEDOUT;
@@ -287,7 +288,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
        for (i = 0; i < size; i += 8)
                t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
 
-       t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
+       t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
        t4_read_reg(adap, ctl_reg);          /* flush write */
 
        delay_idx = 0;
@@ -303,8 +304,8 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
                        mdelay(ms);
 
                v = t4_read_reg(adap, ctl_reg);
-               if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
-                       if (!(v & MBMSGVALID)) {
+               if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
+                       if (!(v & MBMSGVALID_F)) {
                                t4_write_reg(adap, ctl_reg, 0);
                                continue;
                        }
@@ -350,27 +351,27 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
        u32 mc_bist_status_rdata, mc_bist_data_pattern;
 
        if (is_t4(adap->params.chip)) {
-               mc_bist_cmd = MC_BIST_CMD;
-               mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
-               mc_bist_cmd_len = MC_BIST_CMD_LEN;
-               mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
-               mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
+               mc_bist_cmd = MC_BIST_CMD_A;
+               mc_bist_cmd_addr = MC_BIST_CMD_ADDR_A;
+               mc_bist_cmd_len = MC_BIST_CMD_LEN_A;
+               mc_bist_status_rdata = MC_BIST_STATUS_RDATA_A;
+               mc_bist_data_pattern = MC_BIST_DATA_PATTERN_A;
        } else {
-               mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
-               mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
-               mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
-               mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
-               mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
+               mc_bist_cmd = MC_REG(MC_P_BIST_CMD_A, idx);
+               mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
+               mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
+               mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
+               mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
        }
 
-       if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
+       if (t4_read_reg(adap, mc_bist_cmd) & START_BIST_F)
                return -EBUSY;
        t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
        t4_write_reg(adap, mc_bist_cmd_len, 64);
        t4_write_reg(adap, mc_bist_data_pattern, 0xc);
-       t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
-                    BIST_CMD_GAP(1));
-       i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
+       t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE_V(1) | START_BIST_F |
+                    BIST_CMD_GAP_V(1));
+       i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST_F, 0, 10, 1);
        if (i)
                return i;
 
@@ -403,31 +404,31 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
        u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
 
        if (is_t4(adap->params.chip)) {
-               edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
-               edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
-               edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
-               edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
-                                                   idx);
-               edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
+               edc_bist_cmd = EDC_REG(EDC_BIST_CMD_A, idx);
+               edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR_A, idx);
+               edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN_A, idx);
+               edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN_A,
                                                    idx);
+               edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA_A,
+                                               idx);
        } else {
-               edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
-               edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
-               edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
+               edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
+               edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
+               edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
                edc_bist_cmd_data_pattern =
-                       EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
+                       EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
                edc_bist_status_rdata =
-                        EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
+                        EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
        }
 
-       if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
+       if (t4_read_reg(adap, edc_bist_cmd) & START_BIST_F)
                return -EBUSY;
        t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
        t4_write_reg(adap, edc_bist_cmd_len, 64);
        t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
        t4_write_reg(adap, edc_bist_cmd,
-                    BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
-       i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
+                    BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F);
+       i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST_F, 0, 10, 1);
        if (i)
                return i;
 
@@ -505,13 +506,13 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
         * the address is relative to BAR0.
         */
        mem_reg = t4_read_reg(adap,
-                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
+                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
                                                  win));
-       mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
-       mem_base = GET_PCIEOFST(mem_reg) << 10;
+       mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
+       mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
        if (is_t4(adap->params.chip))
                mem_base -= adap->t4_bar0;
-       win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
+       win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn);
 
        /* Calculate our initial PCI-E Memory Window Position and Offset into
         * that Window.
@@ -524,10 +525,10 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
         * attempt to use the new value.)
         */
        t4_write_reg(adap,
-                    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
+                    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
                     pos | win_pf);
        t4_read_reg(adap,
-                   PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+                   PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
 
        /* Transfer data to/from the adapter as long as there's an integral
         * number of 32-bit transfers to complete.
@@ -552,11 +553,11 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
                        pos += mem_aperture;
                        offset = 0;
                        t4_write_reg(adap,
-                                    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
-                                                        win), pos | win_pf);
+                               PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
+                                                   win), pos | win_pf);
                        t4_read_reg(adap,
-                                   PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
-                                                       win));
+                               PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
+                                                   win));
                }
        }
 
@@ -760,14 +761,13 @@ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
 
        if (!byte_cnt || byte_cnt > 4)
                return -EINVAL;
-       if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
+       if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
                return -EBUSY;
-       cont = cont ? SF_CONT : 0;
-       lock = lock ? SF_LOCK : 0;
-       t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
-       ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
+       t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
+                    SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
+       ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
        if (!ret)
-               *valp = t4_read_reg(adapter, SF_DATA);
+               *valp = t4_read_reg(adapter, SF_DATA_A);
        return ret;
 }
 
@@ -788,14 +788,12 @@ static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
 {
        if (!byte_cnt || byte_cnt > 4)
                return -EINVAL;
-       if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
+       if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
                return -EBUSY;
-       cont = cont ? SF_CONT : 0;
-       lock = lock ? SF_LOCK : 0;
-       t4_write_reg(adapter, SF_DATA, val);
-       t4_write_reg(adapter, SF_OP, lock |
-                    cont | BYTECNT(byte_cnt - 1) | OP_WR);
-       return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
+       t4_write_reg(adapter, SF_DATA_A, val);
+       t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
+                    SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
+       return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
 }
 
 /**
@@ -837,8 +835,8 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
  *     (i.e., big-endian), otherwise as 32-bit words in the platform's
  *     natural endianess.
  */
-static int t4_read_flash(struct adapter *adapter, unsigned int addr,
-                        unsigned int nwords, u32 *data, int byte_oriented)
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+                 unsigned int nwords, u32 *data, int byte_oriented)
 {
        int ret;
 
@@ -854,7 +852,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr,
        for ( ; nwords; nwords--, data++) {
                ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
                if (nwords == 1)
-                       t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
+                       t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
                if (ret)
                        return ret;
                if (byte_oriented)
@@ -902,7 +900,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
        if (ret)
                goto unlock;
 
-       t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
+       t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
 
        /* Read the page to verify the write succeeded */
        ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
@@ -918,7 +916,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
        return 0;
 
 unlock:
-       t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
+       t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
        return ret;
 }
 
@@ -1113,7 +1111,7 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
                }
                start++;
        }
-       t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
+       t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
        return ret;
 }
 
@@ -1241,6 +1239,45 @@ out:
        return ret;
 }
 
+/**
+ *     t4_fwcache - firmware cache operation
+ *     @adap: the adapter
+ *     @op  : the operation (flush or flush and invalidate)
+ */
+int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
+{
+       struct fw_params_cmd c;
+
+       memset(&c, 0, sizeof(c));
+       c.op_to_vfn =
+               cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+                           FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+                           FW_PARAMS_CMD_PFN_V(adap->fn) |
+                           FW_PARAMS_CMD_VFN_V(0));
+       c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+       c.param[0].mnem =
+               cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+                           FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
+       c.param[0].val = (__force __be32)op;
+
+       return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
+}
+
+void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
+{
+       unsigned int i, j;
+
+       for (i = 0; i < 8; i++) {
+               u32 *p = la_buf + i;
+
+               t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
+               j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
+               t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
+               for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
+                       *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
+       }
+}
+
 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
                     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
                     FW_PORT_CAP_ANEG)
@@ -1365,95 +1402,97 @@ static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
 static void pcie_intr_handler(struct adapter *adapter)
 {
        static const struct intr_info sysbus_intr_info[] = {
-               { RNPP, "RXNP array parity error", -1, 1 },
-               { RPCP, "RXPC array parity error", -1, 1 },
-               { RCIP, "RXCIF array parity error", -1, 1 },
-               { RCCP, "Rx completions control array parity error", -1, 1 },
-               { RFTP, "RXFT array parity error", -1, 1 },
+               { RNPP_F, "RXNP array parity error", -1, 1 },
+               { RPCP_F, "RXPC array parity error", -1, 1 },
+               { RCIP_F, "RXCIF array parity error", -1, 1 },
+               { RCCP_F, "Rx completions control array parity error", -1, 1 },
+               { RFTP_F, "RXFT array parity error", -1, 1 },
                { 0 }
        };
        static const struct intr_info pcie_port_intr_info[] = {
-               { TPCP, "TXPC array parity error", -1, 1 },
-               { TNPP, "TXNP array parity error", -1, 1 },
-               { TFTP, "TXFT array parity error", -1, 1 },
-               { TCAP, "TXCA array parity error", -1, 1 },
-               { TCIP, "TXCIF array parity error", -1, 1 },
-               { RCAP, "RXCA array parity error", -1, 1 },
-               { OTDD, "outbound request TLP discarded", -1, 1 },
-               { RDPE, "Rx data parity error", -1, 1 },
-               { TDUE, "Tx uncorrectable data error", -1, 1 },
+               { TPCP_F, "TXPC array parity error", -1, 1 },
+               { TNPP_F, "TXNP array parity error", -1, 1 },
+               { TFTP_F, "TXFT array parity error", -1, 1 },
+               { TCAP_F, "TXCA array parity error", -1, 1 },
+               { TCIP_F, "TXCIF array parity error", -1, 1 },
+               { RCAP_F, "RXCA array parity error", -1, 1 },
+               { OTDD_F, "outbound request TLP discarded", -1, 1 },
+               { RDPE_F, "Rx data parity error", -1, 1 },
+               { TDUE_F, "Tx uncorrectable data error", -1, 1 },
                { 0 }
        };
        static const struct intr_info pcie_intr_info[] = {
-               { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
-               { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
-               { MSIDATAPERR, "MSI data parity error", -1, 1 },
-               { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
-               { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
-               { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
-               { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
-               { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
-               { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
-               { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
-               { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
-               { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
-               { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
-               { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
-               { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
-               { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
-               { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
-               { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
-               { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
-               { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
-               { FIDPERR, "PCI FID parity error", -1, 1 },
-               { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
-               { MATAGPERR, "PCI MA tag parity error", -1, 1 },
-               { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
-               { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
-               { RXWRPERR, "PCI Rx write parity error", -1, 1 },
-               { RPLPERR, "PCI replay buffer parity error", -1, 1 },
-               { PCIESINT, "PCI core secondary fault", -1, 1 },
-               { PCIEPINT, "PCI core primary fault", -1, 1 },
-               { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
+               { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
+               { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
+               { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
+               { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+               { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+               { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+               { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+               { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
+               { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
+               { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+               { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
+               { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+               { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+               { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
+               { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+               { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+               { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
+               { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+               { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+               { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+               { FIDPERR_F, "PCI FID parity error", -1, 1 },
+               { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
+               { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
+               { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+               { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
+               { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
+               { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
+               { PCIESINT_F, "PCI core secondary fault", -1, 1 },
+               { PCIEPINT_F, "PCI core primary fault", -1, 1 },
+               { UNXSPLCPLERR_F, "PCI unexpected split completion error",
+                 -1, 0 },
                { 0 }
        };
 
        static struct intr_info t5_pcie_intr_info[] = {
-               { MSTGRPPERR, "Master Response Read Queue parity error",
+               { MSTGRPPERR_F, "Master Response Read Queue parity error",
+                 -1, 1 },
+               { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
+               { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
+               { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+               { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+               { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+               { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+               { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
                  -1, 1 },
-               { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
-               { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
-               { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
-               { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
-               { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
-               { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
-               { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+               { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
                  -1, 1 },
-               { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+               { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+               { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
+               { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+               { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+               { DREQWRPERR_F, "PCI DMA channel write request parity error",
                  -1, 1 },
-               { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
-               { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
-               { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
-               { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
-               { DREQWRPERR, "PCI DMA channel write request parity error",
+               { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+               { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+               { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
+               { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+               { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+               { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+               { FIDPERR_F, "PCI FID parity error", -1, 1 },
+               { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
+               { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
+               { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+               { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
                  -1, 1 },
-               { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
-               { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
-               { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
-               { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
-               { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
-               { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
-               { FIDPERR, "PCI FID parity error", -1, 1 },
-               { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
-               { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
-               { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
-               { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+               { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
                  -1, 1 },
-               { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
-               { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
-               { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
-               { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
-               { READRSPERR, "Outbound read error", -1, 0 },
+               { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
+               { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
+               { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+               { READRSPERR_F, "Outbound read error", -1, 0 },
                { 0 }
        };
 
@@ -1461,15 +1500,15 @@ static void pcie_intr_handler(struct adapter *adapter)
 
        if (is_t4(adapter->params.chip))
                fat = t4_handle_intr_status(adapter,
-                                           PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
-                                           sysbus_intr_info) +
+                               PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
+                               sysbus_intr_info) +
                        t4_handle_intr_status(adapter,
-                                             PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
-                                             pcie_port_intr_info) +
-                       t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+                                       PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
+                                       pcie_port_intr_info) +
+                       t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
                                              pcie_intr_info);
        else
-               fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+               fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
                                            t5_pcie_intr_info);
 
        if (fat)
@@ -1483,11 +1522,11 @@ static void tp_intr_handler(struct adapter *adapter)
 {
        static const struct intr_info tp_intr_info[] = {
                { 0x3fffffff, "TP parity error", -1, 1 },
-               { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+               { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
                { 0 }
        };
 
-       if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
+       if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
                t4_fatal_err(adapter);
 }
 
@@ -1499,102 +1538,107 @@ static void sge_intr_handler(struct adapter *adapter)
        u64 v;
 
        static const struct intr_info sge_intr_info[] = {
-               { ERR_CPL_EXCEED_IQE_SIZE,
+               { ERR_CPL_EXCEED_IQE_SIZE_F,
                  "SGE received CPL exceeding IQE size", -1, 1 },
-               { ERR_INVALID_CIDX_INC,
+               { ERR_INVALID_CIDX_INC_F,
                  "SGE GTS CIDX increment too large", -1, 0 },
-               { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
-               { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
-               { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
-               { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
-               { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
+               { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
+               { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
+               { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
+               { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
+               { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
                  "SGE IQID > 1023 received CPL for FL", -1, 0 },
-               { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
                  0 },
-               { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
                  0 },
-               { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
                  0 },
-               { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
                  0 },
-               { ERR_ING_CTXT_PRIO,
+               { ERR_ING_CTXT_PRIO_F,
                  "SGE too many priority ingress contexts", -1, 0 },
-               { ERR_EGR_CTXT_PRIO,
+               { ERR_EGR_CTXT_PRIO_F,
                  "SGE too many priority egress contexts", -1, 0 },
-               { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
-               { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+               { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
+               { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
                { 0 }
        };
 
-       v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
-               ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
+       v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
+               ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
        if (v) {
                dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
                                (unsigned long long)v);
-               t4_write_reg(adapter, SGE_INT_CAUSE1, v);
-               t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
+               t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
+               t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
        }
 
-       if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
+       if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
            v != 0)
                t4_fatal_err(adapter);
 }
 
+#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
+                     OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
+#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
+                     IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
+
 /*
  * CIM interrupt handler.
  */
 static void cim_intr_handler(struct adapter *adapter)
 {
        static const struct intr_info cim_intr_info[] = {
-               { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
-               { OBQPARERR, "CIM OBQ parity error", -1, 1 },
-               { IBQPARERR, "CIM IBQ parity error", -1, 1 },
-               { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
-               { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
-               { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
-               { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+               { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
+               { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
+               { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
+               { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
+               { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
+               { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
+               { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
                { 0 }
        };
        static const struct intr_info cim_upintr_info[] = {
-               { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
-               { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
-               { ILLWRINT, "CIM illegal write", -1, 1 },
-               { ILLRDINT, "CIM illegal read", -1, 1 },
-               { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
-               { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
-               { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
-               { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
-               { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
-               { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
-               { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
-               { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
-               { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
-               { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
-               { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
-               { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
-               { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
-               { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
-               { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
-               { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
-               { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
-               { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
-               { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
-               { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
-               { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
-               { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
-               { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
-               { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
+               { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
+               { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
+               { ILLWRINT_F, "CIM illegal write", -1, 1 },
+               { ILLRDINT_F, "CIM illegal read", -1, 1 },
+               { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
+               { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
+               { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
+               { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
+               { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
+               { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
+               { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
+               { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
+               { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
+               { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
+               { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
+               { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
+               { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
+               { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
+               { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
+               { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
+               { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
+               { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
+               { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
+               { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
+               { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
+               { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
+               { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
+               { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
                { 0 }
        };
 
        int fat;
 
-       if (t4_read_reg(adapter, MA_PCIE_FW) & PCIE_FW_ERR)
+       if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
                t4_report_fw_error(adapter);
 
-       fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
+       fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
                                    cim_intr_info) +
-             t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
+             t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
                                    cim_upintr_info);
        if (fat)
                t4_fatal_err(adapter);
@@ -1611,7 +1655,7 @@ static void ulprx_intr_handler(struct adapter *adapter)
                { 0 }
        };
 
-       if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
+       if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
                t4_fatal_err(adapter);
 }
 
@@ -1621,19 +1665,19 @@ static void ulprx_intr_handler(struct adapter *adapter)
 static void ulptx_intr_handler(struct adapter *adapter)
 {
        static const struct intr_info ulptx_intr_info[] = {
-               { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
                  0 },
-               { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
                  0 },
-               { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
                  0 },
-               { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
                  0 },
                { 0xfffffff, "ULPTX parity error", -1, 1 },
                { 0 }
        };
 
-       if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
+       if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
                t4_fatal_err(adapter);
 }
 
@@ -1643,19 +1687,20 @@ static void ulptx_intr_handler(struct adapter *adapter)
 static void pmtx_intr_handler(struct adapter *adapter)
 {
        static const struct intr_info pmtx_intr_info[] = {
-               { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
-               { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
-               { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
-               { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
-               { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
-               { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
-               { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
-               { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
-               { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+               { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
+               { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
+               { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
+               { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
+               { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
+               { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
+               { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
+                 -1, 1 },
+               { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
+               { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
                { 0 }
        };
 
-       if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
+       if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
                t4_fatal_err(adapter);
 }
 
@@ -1665,16 +1710,17 @@ static void pmtx_intr_handler(struct adapter *adapter)
 static void pmrx_intr_handler(struct adapter *adapter)
 {
        static const struct intr_info pmrx_intr_info[] = {
-               { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
-               { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
-               { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
-               { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
-               { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
-               { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+               { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
+               { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
+               { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
+               { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
+                 -1, 1 },
+               { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
+               { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
                { 0 }
        };
 
-       if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
+       if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
                t4_fatal_err(adapter);
 }
 
@@ -1684,16 +1730,16 @@ static void pmrx_intr_handler(struct adapter *adapter)
 static void cplsw_intr_handler(struct adapter *adapter)
 {
        static const struct intr_info cplsw_intr_info[] = {
-               { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
-               { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
-               { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
-               { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
-               { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
-               { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+               { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
+               { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
+               { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
+               { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
+               { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
+               { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
                { 0 }
        };
 
-       if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
+       if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
                t4_fatal_err(adapter);
 }
 
@@ -1703,15 +1749,15 @@ static void cplsw_intr_handler(struct adapter *adapter)
 static void le_intr_handler(struct adapter *adap)
 {
        static const struct intr_info le_intr_info[] = {
-               { LIPMISS, "LE LIP miss", -1, 0 },
-               { LIP0, "LE 0 LIP error", -1, 0 },
-               { PARITYERR, "LE parity error", -1, 1 },
-               { UNKNOWNCMD, "LE unknown command", -1, 1 },
-               { REQQPARERR, "LE request queue parity error", -1, 1 },
+               { LIPMISS_F, "LE LIP miss", -1, 0 },
+               { LIP0_F, "LE 0 LIP error", -1, 0 },
+               { PARITYERR_F, "LE parity error", -1, 1 },
+               { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+               { REQQPARERR_F, "LE request queue parity error", -1, 1 },
                { 0 }
        };
 
-       if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
+       if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
                t4_fatal_err(adap);
 }
 
@@ -1725,19 +1771,22 @@ static void mps_intr_handler(struct adapter *adapter)
                { 0 }
        };
        static const struct intr_info mps_tx_intr_info[] = {
-               { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
-               { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
-               { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
-               { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
-               { BUBBLE, "MPS Tx underflow", -1, 1 },
-               { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
-               { FRMERR, "MPS Tx framing error", -1, 1 },
+               { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
+               { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+               { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
+                 -1, 1 },
+               { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
+                 -1, 1 },
+               { BUBBLE_F, "MPS Tx underflow", -1, 1 },
+               { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
+               { FRMERR_F, "MPS Tx framing error", -1, 1 },
                { 0 }
        };
        static const struct intr_info mps_trc_intr_info[] = {
-               { FILTMEM, "MPS TRC filter parity error", -1, 1 },
-               { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
-               { MISCPERR, "MPS TRC misc parity error", -1, 1 },
+               { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
+               { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
+                 -1, 1 },
+               { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
                { 0 }
        };
        static const struct intr_info mps_stat_sram_intr_info[] = {
@@ -1753,37 +1802,37 @@ static void mps_intr_handler(struct adapter *adapter)
                { 0 }
        };
        static const struct intr_info mps_cls_intr_info[] = {
-               { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
-               { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
-               { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+               { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
+               { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
+               { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
                { 0 }
        };
 
        int fat;
 
-       fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
+       fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
                                    mps_rx_intr_info) +
-             t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
+             t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
                                    mps_tx_intr_info) +
-             t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
+             t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
                                    mps_trc_intr_info) +
-             t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
+             t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
                                    mps_stat_sram_intr_info) +
-             t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
+             t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
                                    mps_stat_tx_intr_info) +
-             t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
+             t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
                                    mps_stat_rx_intr_info) +
-             t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
+             t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
                                    mps_cls_intr_info);
 
-       t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
-                    RXINT | TXINT | STATINT);
-       t4_read_reg(adapter, MPS_INT_CAUSE);                    /* flush */
+       t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
+       t4_read_reg(adapter, MPS_INT_CAUSE_A);                    /* flush */
        if (fat)
                t4_fatal_err(adapter);
 }
 
-#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
+#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
+                     ECC_UE_INT_CAUSE_F)
 
 /*
  * EDC/MC interrupt handler.
@@ -1795,40 +1844,40 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
        unsigned int addr, cnt_addr, v;
 
        if (idx <= MEM_EDC1) {
-               addr = EDC_REG(EDC_INT_CAUSE, idx);
-               cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+               addr = EDC_REG(EDC_INT_CAUSE_A, idx);
+               cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
        } else if (idx == MEM_MC) {
                if (is_t4(adapter->params.chip)) {
-                       addr = MC_INT_CAUSE;
-                       cnt_addr = MC_ECC_STATUS;
+                       addr = MC_INT_CAUSE_A;
+                       cnt_addr = MC_ECC_STATUS_A;
                } else {
-                       addr = MC_P_INT_CAUSE;
-                       cnt_addr = MC_P_ECC_STATUS;
+                       addr = MC_P_INT_CAUSE_A;
+                       cnt_addr = MC_P_ECC_STATUS_A;
                }
        } else {
-               addr = MC_REG(MC_P_INT_CAUSE, 1);
-               cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
+               addr = MC_REG(MC_P_INT_CAUSE_A, 1);
+               cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
        }
 
        v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
-       if (v & PERR_INT_CAUSE)
+       if (v & PERR_INT_CAUSE_F)
                dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
                          name[idx]);
-       if (v & ECC_CE_INT_CAUSE) {
-               u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
+       if (v & ECC_CE_INT_CAUSE_F) {
+               u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
 
-               t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
+               t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
                if (printk_ratelimit())
                        dev_warn(adapter->pdev_dev,
                                 "%u %s correctable ECC data error%s\n",
                                 cnt, name[idx], cnt > 1 ? "s" : "");
        }
-       if (v & ECC_UE_INT_CAUSE)
+       if (v & ECC_UE_INT_CAUSE_F)
                dev_alert(adapter->pdev_dev,
                          "%s uncorrectable ECC data error\n", name[idx]);
 
        t4_write_reg(adapter, addr, v);
-       if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
+       if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
                t4_fatal_err(adapter);
 }
 
@@ -1837,26 +1886,26 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
  */
 static void ma_intr_handler(struct adapter *adap)
 {
-       u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
+       u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
 
-       if (status & MEM_PERR_INT_CAUSE) {
+       if (status & MEM_PERR_INT_CAUSE_F) {
                dev_alert(adap->pdev_dev,
                          "MA parity error, parity status %#x\n",
-                         t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
+                         t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
                if (is_t5(adap->params.chip))
                        dev_alert(adap->pdev_dev,
                                  "MA parity error, parity status %#x\n",
                                  t4_read_reg(adap,
-                                             MA_PARITY_ERROR_STATUS2));
+                                             MA_PARITY_ERROR_STATUS2_A));
        }
-       if (status & MEM_WRAP_INT_CAUSE) {
-               v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
+       if (status & MEM_WRAP_INT_CAUSE_F) {
+               v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
                dev_alert(adap->pdev_dev, "MA address wrap-around error by "
                          "client %u to address %#x\n",
-                         MEM_WRAP_CLIENT_NUM_GET(v),
-                         MEM_WRAP_ADDRESS_GET(v) << 4);
+                         MEM_WRAP_CLIENT_NUM_G(v),
+                         MEM_WRAP_ADDRESS_G(v) << 4);
        }
-       t4_write_reg(adap, MA_INT_CAUSE, status);
+       t4_write_reg(adap, MA_INT_CAUSE_A, status);
        t4_fatal_err(adap);
 }
 
@@ -1866,13 +1915,13 @@ static void ma_intr_handler(struct adapter *adap)
 static void smb_intr_handler(struct adapter *adap)
 {
        static const struct intr_info smb_intr_info[] = {
-               { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
-               { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
-               { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+               { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
+               { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
+               { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
                { 0 }
        };
 
-       if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
+       if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
                t4_fatal_err(adap);
 }
 
@@ -1882,14 +1931,14 @@ static void smb_intr_handler(struct adapter *adap)
 static void ncsi_intr_handler(struct adapter *adap)
 {
        static const struct intr_info ncsi_intr_info[] = {
-               { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
-               { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
-               { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
-               { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+               { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
+               { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
+               { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
+               { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
                { 0 }
        };
 
-       if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
+       if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
                t4_fatal_err(adap);
 }
 
@@ -1901,23 +1950,23 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
        u32 v, int_cause_reg;
 
        if (is_t4(adap->params.chip))
-               int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
+               int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
        else
-               int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
+               int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
 
        v = t4_read_reg(adap, int_cause_reg);
 
-       v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
+       v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
        if (!v)
                return;
 
-       if (v & TXFIFO_PRTY_ERR)
+       if (v & TXFIFO_PRTY_ERR_F)
                dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
                          port);
-       if (v & RXFIFO_PRTY_ERR)
+       if (v & RXFIFO_PRTY_ERR_F)
                dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
                          port);
-       t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
+       t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
        t4_fatal_err(adap);
 }
 
@@ -1927,19 +1976,19 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
 static void pl_intr_handler(struct adapter *adap)
 {
        static const struct intr_info pl_intr_info[] = {
-               { FATALPERR, "T4 fatal parity error", -1, 1 },
-               { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+               { FATALPERR_F, "T4 fatal parity error", -1, 1 },
+               { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
                { 0 }
        };
 
-       if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
+       if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
                t4_fatal_err(adap);
 }
 
-#define PF_INTR_MASK (PFSW)
-#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
-               EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
-               CPL_SWITCH | SGE | ULP_TX)
+#define PF_INTR_MASK (PFSW_F)
+#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
+               EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
+               CPL_SWITCH_F | SGE_F | ULP_TX_F)
 
 /**
  *     t4_slow_intr_handler - control path interrupt handler
@@ -1951,60 +2000,60 @@ static void pl_intr_handler(struct adapter *adap)
  */
 int t4_slow_intr_handler(struct adapter *adapter)
 {
-       u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
+       u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
 
        if (!(cause & GLBL_INTR_MASK))
                return 0;
-       if (cause & CIM)
+       if (cause & CIM_F)
                cim_intr_handler(adapter);
-       if (cause & MPS)
+       if (cause & MPS_F)
                mps_intr_handler(adapter);
-       if (cause & NCSI)
+       if (cause & NCSI_F)
                ncsi_intr_handler(adapter);
-       if (cause & PL)
+       if (cause & PL_F)
                pl_intr_handler(adapter);
-       if (cause & SMB)
+       if (cause & SMB_F)
                smb_intr_handler(adapter);
-       if (cause & XGMAC0)
+       if (cause & XGMAC0_F)
                xgmac_intr_handler(adapter, 0);
-       if (cause & XGMAC1)
+       if (cause & XGMAC1_F)
                xgmac_intr_handler(adapter, 1);
-       if (cause & XGMAC_KR0)
+       if (cause & XGMAC_KR0_F)
                xgmac_intr_handler(adapter, 2);
-       if (cause & XGMAC_KR1)
+       if (cause & XGMAC_KR1_F)
                xgmac_intr_handler(adapter, 3);
-       if (cause & PCIE)
+       if (cause & PCIE_F)
                pcie_intr_handler(adapter);
-       if (cause & MC)
+       if (cause & MC_F)
                mem_intr_handler(adapter, MEM_MC);
-       if (!is_t4(adapter->params.chip) && (cause & MC1))
+       if (!is_t4(adapter->params.chip) && (cause & MC1_S))
                mem_intr_handler(adapter, MEM_MC1);
-       if (cause & EDC0)
+       if (cause & EDC0_F)
                mem_intr_handler(adapter, MEM_EDC0);
-       if (cause & EDC1)
+       if (cause & EDC1_F)
                mem_intr_handler(adapter, MEM_EDC1);
-       if (cause & LE)
+       if (cause & LE_F)
                le_intr_handler(adapter);
-       if (cause & TP)
+       if (cause & TP_F)
                tp_intr_handler(adapter);
-       if (cause & MA)
+       if (cause & MA_F)
                ma_intr_handler(adapter);
-       if (cause & PM_TX)
+       if (cause & PM_TX_F)
                pmtx_intr_handler(adapter);
-       if (cause & PM_RX)
+       if (cause & PM_RX_F)
                pmrx_intr_handler(adapter);
-       if (cause & ULP_RX)
+       if (cause & ULP_RX_F)
                ulprx_intr_handler(adapter);
-       if (cause & CPL_SWITCH)
+       if (cause & CPL_SWITCH_F)
                cplsw_intr_handler(adapter);
-       if (cause & SGE)
+       if (cause & SGE_F)
                sge_intr_handler(adapter);
-       if (cause & ULP_TX)
+       if (cause & ULP_TX_F)
                ulptx_intr_handler(adapter);
 
        /* Clear the interrupts just processed for which we are the master. */
-       t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
-       (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
+       t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
+       (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
        return 1;
 }
 
@@ -2023,19 +2072,19 @@ int t4_slow_intr_handler(struct adapter *adapter)
  */
 void t4_intr_enable(struct adapter *adapter)
 {
-       u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
-
-       t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
-                    ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
-                    ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
-                    ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
-                    ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
-                    ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
-                    ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
-                    DBFIFO_HP_INT | DBFIFO_LP_INT |
-                    EGRESS_SIZE_ERR);
-       t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
-       t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
+       u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+
+       t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
+                    ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
+                    ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
+                    ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
+                    ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
+                    ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
+                    ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
+                    DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
+                    EGRESS_SIZE_ERR_F);
+       t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
+       t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
 }
 
 /**
@@ -2048,10 +2097,10 @@ void t4_intr_enable(struct adapter *adapter)
  */
 void t4_intr_disable(struct adapter *adapter)
 {
-       u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
+       u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
 
-       t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
-       t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
+       t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
+       t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
 }
 
 /**
@@ -2166,6 +2215,147 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
        return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
 }
 
+/* Read an RSS table row */
+static int rd_rss_row(struct adapter *adap, int row, u32 *val)
+{
+       t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
+       return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
+                                  5, 0, val);
+}
+
+/**
+ *     t4_read_rss - read the contents of the RSS mapping table
+ *     @adapter: the adapter
+ *     @map: holds the contents of the RSS mapping table
+ *
+ *     Reads the contents of the RSS hash->queue mapping table.
+ */
+int t4_read_rss(struct adapter *adapter, u16 *map)
+{
+       u32 val;
+       int i, ret;
+
+       for (i = 0; i < RSS_NENTRIES / 2; ++i) {
+               ret = rd_rss_row(adapter, i, &val);
+               if (ret)
+                       return ret;
+               *map++ = LKPTBLQUEUE0_G(val);
+               *map++ = LKPTBLQUEUE1_G(val);
+       }
+       return 0;
+}
+
+/**
+ *     t4_read_rss_key - read the global RSS key
+ *     @adap: the adapter
+ *     @key: 10-entry array holding the 320-bit RSS key
+ *
+ *     Reads the global 320-bit RSS key.
+ */
+void t4_read_rss_key(struct adapter *adap, u32 *key)
+{
+       t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+                        TP_RSS_SECRET_KEY0_A);
+}
+
+/**
+ *     t4_write_rss_key - program one of the RSS keys
+ *     @adap: the adapter
+ *     @key: 10-entry array holding the 320-bit RSS key
+ *     @idx: which RSS key to write
+ *
+ *     Writes one of the RSS keys with the given 320-bit value.  If @idx is
+ *     0..15 the corresponding entry in the RSS key table is written,
+ *     otherwise the global RSS key is written.
+ */
+void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
+{
+       t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+                         TP_RSS_SECRET_KEY0_A);
+       if (idx >= 0 && idx < 16)
+               t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+                            KEYWRADDR_V(idx) | KEYWREN_F);
+}
+
+/**
+ *     t4_read_rss_pf_config - read PF RSS Configuration Table
+ *     @adapter: the adapter
+ *     @index: the entry in the PF RSS table to read
+ *     @valp: where to store the returned value
+ *
+ *     Reads the PF RSS Configuration Table at the specified index and returns
+ *     the value found there.
+ */
+void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
+                          u32 *valp)
+{
+       t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                        valp, 1, TP_RSS_PF0_CONFIG_A + index);
+}
+
+/**
+ *     t4_read_rss_vf_config - read VF RSS Configuration Table
+ *     @adapter: the adapter
+ *     @index: the entry in the VF RSS table to read
+ *     @vfl: where to store the returned VFL
+ *     @vfh: where to store the returned VFH
+ *
+ *     Reads the VF RSS Configuration Table at the specified index and returns
+ *     the (VFL, VFH) values found there.
+ */
+void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
+                          u32 *vfl, u32 *vfh)
+{
+       u32 vrt, mask, data;
+
+       mask = VFWRADDR_V(VFWRADDR_M);
+       data = VFWRADDR_V(index);
+
+       /* Request that the index'th VF Table values be read into VFL/VFH.
+        */
+       vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
+       vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
+       vrt |= data | VFRDEN_F;
+       t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
+
+       /* Grab the VFL/VFH values ...
+        */
+       t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                        vfl, 1, TP_RSS_VFL_CONFIG_A);
+       t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                        vfh, 1, TP_RSS_VFH_CONFIG_A);
+}
+
+/**
+ *     t4_read_rss_pf_map - read PF RSS Map
+ *     @adapter: the adapter
+ *
+ *     Reads the PF RSS Map register and returns its value.
+ */
+u32 t4_read_rss_pf_map(struct adapter *adapter)
+{
+       u32 pfmap;
+
+       t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                        &pfmap, 1, TP_RSS_PF_MAP_A);
+       return pfmap;
+}
+
+/**
+ *     t4_read_rss_pf_mask - read PF RSS Mask
+ *     @adapter: the adapter
+ *
+ *     Reads the PF RSS Mask register and returns its value.
+ */
+u32 t4_read_rss_pf_mask(struct adapter *adapter)
+{
+       u32 pfmask;
+
+       t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                        &pfmask, 1, TP_RSS_PF_MSK_A);
+       return pfmask;
+}
+
 /**
  *     t4_tp_get_tcp_stats - read TP's TCP MIB counters
  *     @adap: the adapter
@@ -2178,23 +2368,23 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
                         struct tp_tcp_stats *v6)
 {
-       u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
+       u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
 
-#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
+#define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
 #define STAT(x)     val[STAT_IDX(x)]
 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
 
        if (v4) {
-               t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
-                                ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+                                ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
                v4->tcpOutRsts = STAT(OUT_RST);
                v4->tcpInSegs  = STAT64(IN_SEG);
                v4->tcpOutSegs = STAT64(OUT_SEG);
                v4->tcpRetransSegs = STAT64(RXT_SEG);
        }
        if (v6) {
-               t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
-                                ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+                                ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
                v6->tcpOutRsts = STAT(OUT_RST);
                v6->tcpInSegs  = STAT64(IN_SEG);
                v6->tcpOutSegs = STAT64(OUT_SEG);
@@ -2219,15 +2409,36 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
        int i;
 
        for (i = 0; i < NMTUS; ++i) {
-               t4_write_reg(adap, TP_MTU_TABLE,
-                            MTUINDEX(0xff) | MTUVALUE(i));
-               v = t4_read_reg(adap, TP_MTU_TABLE);
-               mtus[i] = MTUVALUE_GET(v);
+               t4_write_reg(adap, TP_MTU_TABLE_A,
+                            MTUINDEX_V(0xff) | MTUVALUE_V(i));
+               v = t4_read_reg(adap, TP_MTU_TABLE_A);
+               mtus[i] = MTUVALUE_G(v);
                if (mtu_log)
-                       mtu_log[i] = MTUWIDTH_GET(v);
+                       mtu_log[i] = MTUWIDTH_G(v);
        }
 }
 
+/**
+ *     t4_read_cong_tbl - reads the congestion control table
+ *     @adap: the adapter
+ *     @incr: where to store the alpha values
+ *
+ *     Reads the additive increments programmed into the HW congestion
+ *     control table.
+ */
+void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
+{
+       unsigned int mtu, w;
+
+       for (mtu = 0; mtu < NMTUS; ++mtu)
+               for (w = 0; w < NCCTRL_WIN; ++w) {
+                       t4_write_reg(adap, TP_CCTRL_TABLE_A,
+                                    ROWINDEX_V(0xffff) | (mtu << 5) | w);
+                       incr[mtu][w] = (u16)t4_read_reg(adap,
+                                               TP_CCTRL_TABLE_A) & 0x1fff;
+               }
+}
+
 /**
  *     t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
  *     @adap: the adapter
@@ -2240,9 +2451,9 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
                            unsigned int mask, unsigned int val)
 {
-       t4_write_reg(adap, TP_PIO_ADDR, addr);
-       val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
-       t4_write_reg(adap, TP_PIO_DATA, val);
+       t4_write_reg(adap, TP_PIO_ADDR_A, addr);
+       val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
+       t4_write_reg(adap, TP_PIO_DATA_A, val);
 }
 
 /**
@@ -2321,8 +2532,8 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
 
                if (!(mtu & ((1 << log2) >> 2)))     /* round */
                        log2--;
-               t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
-                            MTUWIDTH(log2) | MTUVALUE(mtu));
+               t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
+                            MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
 
                for (w = 0; w < NCCTRL_WIN; ++w) {
                        unsigned int inc;
@@ -2330,12 +2541,66 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
                        inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
                                  CC_MIN_INCR);
 
-                       t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
+                       t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
                                     (w << 16) | (beta[w] << 13) | inc);
                }
        }
 }
 
+/**
+ *     t4_pmtx_get_stats - returns the HW stats from PMTX
+ *     @adap: the adapter
+ *     @cnt: where to store the count statistics
+ *     @cycles: where to store the cycle statistics
+ *
+ *     Returns performance statistics from PMTX.
+ */
+void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
+{
+       int i;
+       u32 data[2];
+
+       for (i = 0; i < PM_NSTATS; i++) {
+               t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
+               cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
+               if (is_t4(adap->params.chip)) {
+                       cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
+               } else {
+                       t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
+                                        PM_TX_DBG_DATA_A, data, 2,
+                                        PM_TX_DBG_STAT_MSB_A);
+                       cycles[i] = (((u64)data[0] << 32) | data[1]);
+               }
+       }
+}
+
+/**
+ *     t4_pmrx_get_stats - returns the HW stats from PMRX
+ *     @adap: the adapter
+ *     @cnt: where to store the count statistics
+ *     @cycles: where to store the cycle statistics
+ *
+ *     Returns performance statistics from PMRX.
+ */
+void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
+{
+       int i;
+       u32 data[2];
+
+       for (i = 0; i < PM_NSTATS; i++) {
+               t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
+               cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
+               if (is_t4(adap->params.chip)) {
+                       cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
+               } else {
+                       t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
+                                        PM_RX_DBG_DATA_A, data, 2,
+                                        PM_RX_DBG_STAT_MSB_A);
+                       cycles[i] = (((u64)data[0] << 32) | data[1]);
+               }
+       }
+}
+
 /**
  *     get_mps_bg_map - return the buffer groups associated with a port
  *     @adap: the adapter
@@ -2347,7 +2612,7 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
  */
 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
 {
-       u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
+       u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
 
        if (n == 0)
                return idx == 0 ? 0xf : 0;
@@ -2485,11 +2750,11 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
        if (is_t4(adap->params.chip)) {
                mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
                mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
-               port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+               port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
        } else {
                mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
                mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
-               port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
+               port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
        }
 
        if (addr) {
@@ -2499,8 +2764,8 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
                t4_write_reg(adap, mag_id_reg_h,
                             (addr[0] << 8) | addr[1]);
        }
-       t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
-                        addr ? MAGICEN : 0);
+       t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F,
+                        addr ? MAGICEN_F : 0);
 }
 
 /**
@@ -2525,20 +2790,21 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
        u32 port_cfg_reg;
 
        if (is_t4(adap->params.chip))
-               port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+               port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
        else
-               port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
+               port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
 
        if (!enable) {
-               t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
+               t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0);
                return 0;
        }
        if (map > 0xff)
                return -EINVAL;
 
 #define EPIO_REG(name) \
-       (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
-       T5_PORT_REG(port, MAC_PORT_EPIO_##name))
+       (is_t4(adap->params.chip) ? \
+        PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
+        T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
 
        t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
        t4_write_reg(adap, EPIO_REG(DATA2), mask1);
@@ -2550,21 +2816,21 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
 
                /* write byte masks */
                t4_write_reg(adap, EPIO_REG(DATA0), mask0);
-               t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
+               t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F);
                t4_read_reg(adap, EPIO_REG(OP));                /* flush */
-               if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
+               if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
                        return -ETIMEDOUT;
 
                /* write CRC */
                t4_write_reg(adap, EPIO_REG(DATA0), crc);
-               t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
+               t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F);
                t4_read_reg(adap, EPIO_REG(OP));                /* flush */
-               if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
+               if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
                        return -ETIMEDOUT;
        }
 #undef EPIO_REG
 
-       t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
+       t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F);
        return 0;
 }
 
@@ -2749,9 +3015,9 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
                "IDMA_FL_SEND_COMPLETION_TO_IMSG",
        };
        static const u32 sge_regs[] = {
-               SGE_DEBUG_DATA_LOW_INDEX_2,
-               SGE_DEBUG_DATA_LOW_INDEX_3,
-               SGE_DEBUG_DATA_HIGH_INDEX_10,
+               SGE_DEBUG_DATA_LOW_INDEX_2_A,
+               SGE_DEBUG_DATA_LOW_INDEX_3_A,
+               SGE_DEBUG_DATA_HIGH_INDEX_10_A,
        };
        const char **sge_idma_decode;
        int sge_idma_decode_nstates;
@@ -2818,7 +3084,7 @@ retry:
        if (ret < 0) {
                if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
                        goto retry;
-               if (t4_read_reg(adap, MA_PCIE_FW) & PCIE_FW_ERR)
+               if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
                        t4_report_fw_error(adap);
                return ret;
        }
@@ -2868,8 +3134,8 @@ retry:
                         * timeout ... and then retry if we haven't exhausted
                         * our retries ...
                         */
-                       pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
-                       if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
+                       pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+                       if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
                                if (waiting <= 0) {
                                        if (retries-- > 0)
                                                goto retry;
@@ -2884,9 +3150,9 @@ retry:
                         * report errors preferentially.
                         */
                        if (state) {
-                               if (pcie_fw & PCIE_FW_ERR)
+                               if (pcie_fw & PCIE_FW_ERR_F)
                                        *state = DEV_STATE_ERR;
-                               else if (pcie_fw & PCIE_FW_INIT)
+                               else if (pcie_fw & PCIE_FW_INIT_F)
                                        *state = DEV_STATE_INIT;
                        }
 
@@ -2896,7 +3162,7 @@ retry:
                         * for our caller.
                         */
                        if (master_mbox == PCIE_FW_MASTER_M &&
-                           (pcie_fw & PCIE_FW_MASTER_VLD))
+                           (pcie_fw & PCIE_FW_MASTER_VLD_F))
                                master_mbox = PCIE_FW_MASTER_G(pcie_fw);
                        break;
                }
@@ -2985,7 +3251,7 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
 
                memset(&c, 0, sizeof(c));
                INIT_CMD(c, RESET, WRITE);
-               c.val = htonl(PIORST | PIORSTMODE);
+               c.val = htonl(PIORST_F | PIORSTMODE_F);
                c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
                ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
        }
@@ -3004,8 +3270,8 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
         * rather than a RESET ... if it's new enough to understand that ...
         */
        if (ret == 0 || force) {
-               t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
-               t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F,
+               t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
+               t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
                                 PCIE_FW_HALT_F);
        }
 
@@ -3045,7 +3311,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
                 * doing it automatically, we need to clear the PCIE_FW.HALT
                 * bit.
                 */
-               t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, 0);
+               t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
 
                /*
                 * If we've been given a valid mailbox, first try to get the
@@ -3055,21 +3321,21 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
                 * hitting the chip with a hammer.
                 */
                if (mbox <= PCIE_FW_MASTER_M) {
-                       t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
+                       t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
                        msleep(100);
                        if (t4_fw_reset(adap, mbox,
-                                       PIORST | PIORSTMODE) == 0)
+                                       PIORST_F | PIORSTMODE_F) == 0)
                                return 0;
                }
 
-               t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
+               t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
                msleep(2000);
        } else {
                int ms;
 
-               t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
+               t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
                for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
-                       if (!(t4_read_reg(adap, PCIE_FW) & PCIE_FW_HALT_F))
+                       if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
                                return 0;
                        msleep(100);
                        ms += 100;
@@ -3148,22 +3414,23 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
        unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
        unsigned int fl_align_log = fls(fl_align) - 1;
 
-       t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
-                    HOSTPAGESIZEPF0(sge_hps) |
-                    HOSTPAGESIZEPF1(sge_hps) |
-                    HOSTPAGESIZEPF2(sge_hps) |
-                    HOSTPAGESIZEPF3(sge_hps) |
-                    HOSTPAGESIZEPF4(sge_hps) |
-                    HOSTPAGESIZEPF5(sge_hps) |
-                    HOSTPAGESIZEPF6(sge_hps) |
-                    HOSTPAGESIZEPF7(sge_hps));
+       t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
+                    HOSTPAGESIZEPF0_V(sge_hps) |
+                    HOSTPAGESIZEPF1_V(sge_hps) |
+                    HOSTPAGESIZEPF2_V(sge_hps) |
+                    HOSTPAGESIZEPF3_V(sge_hps) |
+                    HOSTPAGESIZEPF4_V(sge_hps) |
+                    HOSTPAGESIZEPF5_V(sge_hps) |
+                    HOSTPAGESIZEPF6_V(sge_hps) |
+                    HOSTPAGESIZEPF7_V(sge_hps));
 
        if (is_t4(adap->params.chip)) {
-               t4_set_reg_field(adap, SGE_CONTROL,
-                                INGPADBOUNDARY_MASK |
-                                EGRSTATUSPAGESIZE_MASK,
-                                INGPADBOUNDARY(fl_align_log - 5) |
-                                EGRSTATUSPAGESIZE(stat_len != 64));
+               t4_set_reg_field(adap, SGE_CONTROL_A,
+                                INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+                                EGRSTATUSPAGESIZE_F,
+                                INGPADBOUNDARY_V(fl_align_log -
+                                                 INGPADBOUNDARY_SHIFT_X) |
+                                EGRSTATUSPAGESIZE_V(stat_len != 64));
        } else {
                /* T5 introduced the separation of the Free List Padding and
                 * Packing Boundaries.  Thus, we can select a smaller Padding
@@ -3193,15 +3460,15 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
                        fl_align = 64;
                        fl_align_log = 6;
                }
-               t4_set_reg_field(adap, SGE_CONTROL,
-                                INGPADBOUNDARY_MASK |
-                                EGRSTATUSPAGESIZE_MASK,
-                                INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
-                                EGRSTATUSPAGESIZE(stat_len != 64));
+               t4_set_reg_field(adap, SGE_CONTROL_A,
+                                INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+                                EGRSTATUSPAGESIZE_F,
+                                INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
+                                EGRSTATUSPAGESIZE_V(stat_len != 64));
                t4_set_reg_field(adap, SGE_CONTROL2_A,
                                 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
                                 INGPACKBOUNDARY_V(fl_align_log -
-                                                INGPACKBOUNDARY_SHIFT_X));
+                                                  INGPACKBOUNDARY_SHIFT_X));
        }
        /*
         * Adjust various SGE Free List Host Buffer Sizes.
@@ -3224,15 +3491,15 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
         * Default Firmware Configuration File but we need to adjust it for
         * this host's cache line size.
         */
-       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
-       t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
-                    (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
+                    (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
                     & ~(fl_align-1));
-       t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
-                    (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
+                    (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
                     & ~(fl_align-1));
 
-       t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
+       t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
 
        return 0;
 }
@@ -3917,12 +4184,12 @@ int t4_wait_dev_ready(void __iomem *regs)
 {
        u32 whoami;
 
-       whoami = readl(regs + PL_WHOAMI);
+       whoami = readl(regs + PL_WHOAMI_A);
        if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
                return 0;
 
        msleep(500);
-       whoami = readl(regs + PL_WHOAMI);
+       whoami = readl(regs + PL_WHOAMI_A);
        return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
 }
 
@@ -3946,7 +4213,7 @@ static int get_flash_params(struct adapter *adap)
        ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
        if (!ret)
                ret = sf1_read(adap, 3, 0, 1, &info);
-       t4_write_reg(adap, SF_OP, 0);                    /* unlock SF */
+       t4_write_reg(adap, SF_OP_A, 0);                    /* unlock SF */
        if (ret)
                return ret;
 
@@ -3969,7 +4236,7 @@ static int get_flash_params(struct adapter *adap)
                return -EINVAL;
        adap->params.sf_size = 1 << info;
        adap->params.sf_fw_start =
-               t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
+               t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
 
        if (adap->params.sf_size < FLASH_MIN_SIZE)
                dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
@@ -3993,7 +4260,7 @@ int t4_prep_adapter(struct adapter *adapter)
        u32 pl_rev;
 
        get_pci_mode(adapter, &adapter->params.pci);
-       pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
+       pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
 
        ret = get_flash_params(adapter);
        if (ret < 0) {
@@ -4019,6 +4286,7 @@ int t4_prep_adapter(struct adapter *adapter)
                return -EINVAL;
        }
 
+       adapter->params.cim_la_size = CIMLA_SIZE;
        init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
 
        /*
@@ -4133,7 +4401,7 @@ int t4_init_sge_params(struct adapter *adapter)
 
        /* Extract the SGE Page Size for our PF.
         */
-       hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE);
+       hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
        s_hps = (HOSTPAGESIZEPF0_S +
                 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
        sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
@@ -4142,10 +4410,10 @@ int t4_init_sge_params(struct adapter *adapter)
         */
        s_qpp = (QUEUESPERPAGEPF0_S +
                (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
-       qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF);
-       sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
-       qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF);
-       sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
+       qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
+       sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
+       qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
+       sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
 
        return 0;
 }
@@ -4161,9 +4429,9 @@ int t4_init_tp_params(struct adapter *adap)
        int chan;
        u32 v;
 
-       v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
-       adap->params.tp.tre = TIMERRESOLUTION_GET(v);
-       adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
+       v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
+       adap->params.tp.tre = TIMERRESOLUTION_G(v);
+       adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
 
        /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
        for (chan = 0; chan < NCHAN; chan++)
@@ -4172,27 +4440,27 @@ int t4_init_tp_params(struct adapter *adap)
        /* Cache the adapter's Compressed Filter Mode and global Incress
         * Configuration.
         */
-       t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+       t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
                         &adap->params.tp.vlan_pri_map, 1,
-                        TP_VLAN_PRI_MAP);
-       t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+                        TP_VLAN_PRI_MAP_A);
+       t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
                         &adap->params.tp.ingress_config, 1,
-                        TP_INGRESS_CONFIG);
+                        TP_INGRESS_CONFIG_A);
 
        /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
         * shift positions of several elements of the Compressed Filter Tuple
         * for this adapter which we need frequently ...
         */
-       adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
-       adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
-       adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+       adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
+       adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
+       adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
        adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
-                                                              F_PROTOCOL);
+                                                              PROTOCOL_F);
 
        /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
         * represents the presense of an Outer VLAN instead of a VNIC ID.
         */
-       if ((adap->params.tp.ingress_config & F_VNIC) == 0)
+       if ((adap->params.tp.ingress_config & VNIC_F) == 0)
                adap->params.tp.vnic_shift = -1;
 
        return 0;
@@ -4218,35 +4486,35 @@ int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
 
        for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
                switch (filter_mode & sel) {
-               case F_FCOE:
-                       field_shift += W_FT_FCOE;
+               case FCOE_F:
+                       field_shift += FT_FCOE_W;
                        break;
-               case F_PORT:
-                       field_shift += W_FT_PORT;
+               case PORT_F:
+                       field_shift += FT_PORT_W;
                        break;
-               case F_VNIC_ID:
-                       field_shift += W_FT_VNIC_ID;
+               case VNIC_ID_F:
+                       field_shift += FT_VNIC_ID_W;
                        break;
-               case F_VLAN:
-                       field_shift += W_FT_VLAN;
+               case VLAN_F:
+                       field_shift += FT_VLAN_W;
                        break;
-               case F_TOS:
-                       field_shift += W_FT_TOS;
+               case TOS_F:
+                       field_shift += FT_TOS_W;
                        break;
-               case F_PROTOCOL:
-                       field_shift += W_FT_PROTOCOL;
+               case PROTOCOL_F:
+                       field_shift += FT_PROTOCOL_W;
                        break;
-               case F_ETHERTYPE:
-                       field_shift += W_FT_ETHERTYPE;
+               case ETHERTYPE_F:
+                       field_shift += FT_ETHERTYPE_W;
                        break;
-               case F_MACMATCH:
-                       field_shift += W_FT_MACMATCH;
+               case MACMATCH_F:
+                       field_shift += FT_MACMATCH_W;
                        break;
-               case F_MPSHITTYPE:
-                       field_shift += W_FT_MPSHITTYPE;
+               case MPSHITTYPE_F:
+                       field_shift += FT_MPSHITTYPE_W;
                        break;
-               case F_FRAGMENTATION:
-                       field_shift += W_FT_FRAGMENTATION;
+               case FRAGMENTATION_F:
+                       field_shift += FT_FRAGMENTATION_W;
                        break;
                }
        }
@@ -4311,3 +4579,289 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
        }
        return 0;
 }
+
+/**
+ *     t4_read_cimq_cfg - read CIM queue configuration
+ *     @adap: the adapter
+ *     @base: holds the queue base addresses in bytes
+ *     @size: holds the queue sizes in bytes
+ *     @thres: holds the queue full thresholds in bytes
+ *
+ *     Returns the current configuration of the CIM queues, starting with
+ *     the IBQs, then the OBQs.
+ */
+void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
+{
+       unsigned int i, v;
+       int cim_num_obq = is_t4(adap->params.chip) ?
+                               CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
+
+       for (i = 0; i < CIM_NUM_IBQ; i++) {
+               t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
+                            QUENUMSELECT_V(i));
+               v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
+               /* value is in 256-byte units */
+               *base++ = CIMQBASE_G(v) * 256;
+               *size++ = CIMQSIZE_G(v) * 256;
+               *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
+       }
+       for (i = 0; i < cim_num_obq; i++) {
+               t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
+                            QUENUMSELECT_V(i));
+               v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
+               /* value is in 256-byte units */
+               *base++ = CIMQBASE_G(v) * 256;
+               *size++ = CIMQSIZE_G(v) * 256;
+       }
+}
+
+/**
+ *     t4_read_cim_ibq - read the contents of a CIM inbound queue
+ *     @adap: the adapter
+ *     @qid: the queue index
+ *     @data: where to store the queue contents
+ *     @n: capacity of @data in 32-bit words
+ *
+ *     Reads the contents of the selected CIM queue starting at address 0 up
+ *     to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
+ *     error and the number of 32-bit words actually read on success.
+ */
+int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+{
+       int i, err, attempts;
+       unsigned int addr;
+       const unsigned int nwords = CIM_IBQ_SIZE * 4;
+
+       if (qid > 5 || (n & 3))
+               return -EINVAL;
+
+       addr = qid * nwords;
+       if (n > nwords)
+               n = nwords;
+
+       /* It might take 3-10ms before the IBQ debug read access is allowed.
+        * Wait for 1 Sec with a delay of 1 usec.
+        */
+       attempts = 1000000;
+
+       for (i = 0; i < n; i++, addr++) {
+               t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
+                            IBQDBGEN_F);
+               err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
+                                     attempts, 1);
+               if (err)
+                       return err;
+               *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
+       }
+       t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
+       return i;
+}
+
+/**
+ *     t4_read_cim_obq - read the contents of a CIM outbound queue
+ *     @adap: the adapter
+ *     @qid: the queue index
+ *     @data: where to store the queue contents
+ *     @n: capacity of @data in 32-bit words
+ *
+ *     Reads the contents of the selected CIM queue starting at address 0 up
+ *     to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
+ *     error and the number of 32-bit words actually read on success.
+ */
+int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+{
+       int i, err;
+       unsigned int addr, v, nwords;
+       int cim_num_obq = is_t4(adap->params.chip) ?
+                               CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
+
+       if ((qid > (cim_num_obq - 1)) || (n & 3))
+               return -EINVAL;
+
+       t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
+                    QUENUMSELECT_V(qid));
+       v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
+
+       addr = CIMQBASE_G(v) * 64;    /* muliple of 256 -> muliple of 4 */
+       nwords = CIMQSIZE_G(v) * 64;  /* same */
+       if (n > nwords)
+               n = nwords;
+
+       for (i = 0; i < n; i++, addr++) {
+               t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
+                            OBQDBGEN_F);
+               err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
+                                     2, 1);
+               if (err)
+                       return err;
+               *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
+       }
+       t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
+       return i;
+}
+
+/**
+ *     t4_cim_read - read a block from CIM internal address space
+ *     @adap: the adapter
+ *     @addr: the start address within the CIM address space
+ *     @n: number of words to read
+ *     @valp: where to store the result
+ *
+ *     Reads a block of 4-byte words from the CIM intenal address space.
+ */
+int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
+               unsigned int *valp)
+{
+       int ret = 0;
+
+       if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
+               return -EBUSY;
+
+       for ( ; !ret && n--; addr += 4) {
+               t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
+               ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
+                                     0, 5, 2);
+               if (!ret)
+                       *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
+       }
+       return ret;
+}
+
+/**
+ *     t4_cim_write - write a block into CIM internal address space
+ *     @adap: the adapter
+ *     @addr: the start address within the CIM address space
+ *     @n: number of words to write
+ *     @valp: set of values to write
+ *
+ *     Writes a block of 4-byte words into the CIM intenal address space.
+ */
+int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
+                const unsigned int *valp)
+{
+       int ret = 0;
+
+       if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
+               return -EBUSY;
+
+       for ( ; !ret && n--; addr += 4) {
+               t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
+               t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
+               ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
+                                     0, 5, 2);
+       }
+       return ret;
+}
+
+static int t4_cim_write1(struct adapter *adap, unsigned int addr,
+                        unsigned int val)
+{
+       return t4_cim_write(adap, addr, 1, &val);
+}
+
+/**
+ *     t4_cim_read_la - read CIM LA capture buffer
+ *     @adap: the adapter
+ *     @la_buf: where to store the LA data
+ *     @wrptr: the HW write pointer within the capture buffer
+ *
+ *     Reads the contents of the CIM LA buffer with the most recent entry at
+ *     the end of the returned data and with the entry at @wrptr first.
+ *     We try to leave the LA in the running state we find it in.
+ */
+int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
+{
+       int i, ret;
+       unsigned int cfg, val, idx;
+
+       ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
+       if (ret)
+               return ret;
+
+       if (cfg & UPDBGLAEN_F) {        /* LA is running, freeze it */
+               ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
+               if (ret)
+                       return ret;
+       }
+
+       ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
+       if (ret)
+               goto restart;
+
+       idx = UPDBGLAWRPTR_G(val);
+       if (wrptr)
+               *wrptr = idx;
+
+       for (i = 0; i < adap->params.cim_la_size; i++) {
+               ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
+                                   UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
+               if (ret)
+                       break;
+               ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
+               if (ret)
+                       break;
+               if (val & UPDBGLARDEN_F) {
+                       ret = -ETIMEDOUT;
+                       break;
+               }
+               ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
+               if (ret)
+                       break;
+               idx = (idx + 1) & UPDBGLARDPTR_M;
+       }
+restart:
+       if (cfg & UPDBGLAEN_F) {
+               int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
+                                     cfg & ~UPDBGLARDEN_F);
+               if (!ret)
+                       ret = r;
+       }
+       return ret;
+}
+
+/**
+ *     t4_tp_read_la - read TP LA capture buffer
+ *     @adap: the adapter
+ *     @la_buf: where to store the LA data
+ *     @wrptr: the HW write pointer within the capture buffer
+ *
+ *     Reads the contents of the TP LA buffer with the most recent entry at
+ *     the end of the returned data and with the entry at @wrptr first.
+ *     We leave the LA in the running state we find it in.
+ */
+void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
+{
+       bool last_incomplete;
+       unsigned int i, cfg, val, idx;
+
+       cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
+       if (cfg & DBGLAENABLE_F)                        /* freeze LA */
+               t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
+                            adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
+
+       val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
+       idx = DBGLAWPTR_G(val);
+       last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
+       if (last_incomplete)
+               idx = (idx + 1) & DBGLARPTR_M;
+       if (wrptr)
+               *wrptr = idx;
+
+       val &= 0xffff;
+       val &= ~DBGLARPTR_V(DBGLARPTR_M);
+       val |= adap->params.tp.la_mask;
+
+       for (i = 0; i < TPLA_SIZE; i++) {
+               t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
+               la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
+               idx = (idx + 1) & DBGLARPTR_M;
+       }
+
+       /* Wipe out last entry if it isn't valid */
+       if (last_incomplete)
+               la_buf[TPLA_SIZE - 1] = ~0ULL;
+
+       if (cfg & DBGLAENABLE_F)                    /* restore running state */
+               t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
+                            cfg | adap->params.tp.la_mask);
+}
index c19a90e7f7d1934ee6011d5601bfb0d46982710e..380b15c0417a142c2d3dd9af6d5bc5767927aaa6 100644 (file)
@@ -48,6 +48,7 @@ enum {
        NMTUS          = 16,    /* size of MTU table */
        NCCTRL_WIN     = 32,    /* # of congestion control windows */
        L2T_SIZE       = 4096,  /* # of L2T entries */
+       PM_NSTATS      = 5,     /* # of PM stats */
        MBOX_LEN       = 64,    /* mailbox size in bytes */
        TRACE_LEN      = 112,   /* length of trace data and mask */
        FILTER_OPT_LEN = 36,    /* filter tuple width for optional components */
@@ -55,6 +56,17 @@ enum {
        WOL_PAT_LEN    = 128,   /* length of WoL patterns */
 };
 
+enum {
+       CIM_NUM_IBQ    = 6,     /* # of CIM IBQs */
+       CIM_NUM_OBQ    = 6,     /* # of CIM OBQs */
+       CIM_NUM_OBQ_T5 = 8,     /* # of CIM OBQs for T5 adapter */
+       CIMLA_SIZE     = 2048,  /* # of 32-bit words in CIM LA */
+       CIM_IBQ_SIZE   = 128,   /* # of 128-bit words in a CIM IBQ */
+       CIM_OBQ_SIZE   = 128,   /* # of 128-bit words in a CIM OBQ */
+       TPLA_SIZE      = 128,   /* # of 64-bit words in TP LA */
+       ULPRX_LA_SIZE  = 512,   /* # of 256-bit words in ULP_RX LA */
+};
+
 enum {
        SF_PAGE_SIZE = 256,           /* serial flash page size */
        SF_SEC_SIZE = 64 * 1024,      /* serial flash sector size */
@@ -110,6 +122,18 @@ enum {
        SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */
 };
 
+/* PCI-e memory window access */
+enum pcie_memwin {
+       MEMWIN_NIC      = 0,
+       MEMWIN_RSVD1    = 1,
+       MEMWIN_RSVD2    = 2,
+       MEMWIN_RDMA     = 3,
+       MEMWIN_RSVD4    = 4,
+       MEMWIN_FOISCSI  = 5,
+       MEMWIN_CSIOSTOR = 6,
+       MEMWIN_RSVD7    = 7,
+};
+
 struct sge_qstat {                /* data written to SGE queue status entries */
        __be32 qid;
        __be16 cidx;
index 0f89f68948aba2e6f42f87188827ce7eac7bcf7e..0fb975e258b35b08af3f43a8dbadbd1752d7dea5 100644 (file)
@@ -123,6 +123,13 @@ enum CPL_error {
        CPL_ERR_IWARP_FLM          = 50,
 };
 
+enum {
+       CPL_CONN_POLICY_AUTO = 0,
+       CPL_CONN_POLICY_ASK  = 1,
+       CPL_CONN_POLICY_FILTER = 2,
+       CPL_CONN_POLICY_DENY = 3
+};
+
 enum {
        ULP_MODE_NONE          = 0,
        ULP_MODE_ISCSI         = 2,
@@ -160,16 +167,28 @@ union opcode_tid {
        u8 opcode;
 };
 
-#define CPL_OPCODE(x) ((x) << 24)
-#define G_CPL_OPCODE(x) (((x) >> 24) & 0xFF)
-#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid))
+#define CPL_OPCODE_S    24
+#define CPL_OPCODE_V(x) ((x) << CPL_OPCODE_S)
+#define CPL_OPCODE_G(x) (((x) >> CPL_OPCODE_S) & 0xFF)
+#define TID_G(x)    ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE_V(opcode) | (tid))
+
 #define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
-#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (TID_G(be32_to_cpu(OPCODE_TID(cmd))))
 
 /* partitioning of TID fields that also carry a queue id */
-#define GET_TID_TID(x) ((x) & 0x3fff)
-#define GET_TID_QID(x) (((x) >> 14) & 0x3ff)
-#define TID_QID(x)     ((x) << 14)
+#define TID_TID_S    0
+#define TID_TID_M    0x3fff
+#define TID_TID_G(x) (((x) >> TID_TID_S) & TID_TID_M)
+
+#define TID_QID_S    14
+#define TID_QID_M    0x3ff
+#define TID_QID_V(x) ((x) << TID_QID_S)
+#define TID_QID_G(x) (((x) >> TID_QID_S) & TID_QID_M)
 
 struct rss_header {
        u8 opcode;
@@ -199,8 +218,8 @@ struct work_request_hdr {
 };
 
 /* wr_hi fields */
-#define S_WR_OP    24
-#define V_WR_OP(x) ((__u64)(x) << S_WR_OP)
+#define WR_OP_S    24
+#define WR_OP_V(x) ((__u64)(x) << WR_OP_S)
 
 #define WR_HDR struct work_request_hdr wr
 
@@ -270,17 +289,42 @@ struct cpl_pass_open_req {
        __be32 local_ip;
        __be32 peer_ip;
        __be64 opt0;
-#define NO_CONG(x)    ((x) << 4)
-#define DELACK(x)     ((x) << 5)
-#define DSCP(x)       ((x) << 22)
-#define TCAM_BYPASS(x) ((u64)(x) << 48)
-#define NAGLE(x)      ((u64)(x) << 49)
        __be64 opt1;
-#define SYN_RSS_ENABLE   (1 << 0)
-#define SYN_RSS_QUEUE(x) ((x) << 2)
-#define CONN_POLICY_ASK  (1 << 22)
 };
 
+/* option 0 fields */
+#define NO_CONG_S    4
+#define NO_CONG_V(x) ((x) << NO_CONG_S)
+#define NO_CONG_F    NO_CONG_V(1U)
+
+#define DELACK_S    5
+#define DELACK_V(x) ((x) << DELACK_S)
+#define DELACK_F    DELACK_V(1U)
+
+#define DSCP_S    22
+#define DSCP_M    0x3F
+#define DSCP_V(x) ((x) << DSCP_S)
+#define DSCP_G(x) (((x) >> DSCP_S) & DSCP_M)
+
+#define TCAM_BYPASS_S    48
+#define TCAM_BYPASS_V(x) ((__u64)(x) << TCAM_BYPASS_S)
+#define TCAM_BYPASS_F    TCAM_BYPASS_V(1ULL)
+
+#define NAGLE_S    49
+#define NAGLE_V(x) ((__u64)(x) << NAGLE_S)
+#define NAGLE_F    NAGLE_V(1ULL)
+
+/* option 1 fields */
+#define SYN_RSS_ENABLE_S    0
+#define SYN_RSS_ENABLE_V(x) ((x) << SYN_RSS_ENABLE_S)
+#define SYN_RSS_ENABLE_F    SYN_RSS_ENABLE_V(1U)
+
+#define SYN_RSS_QUEUE_S    2
+#define SYN_RSS_QUEUE_V(x) ((x) << SYN_RSS_QUEUE_S)
+
+#define CONN_POLICY_S    22
+#define CONN_POLICY_V(x) ((x) << CONN_POLICY_S)
+
 struct cpl_pass_open_req6 {
        WR_HDR;
        union opcode_tid ot;
@@ -304,16 +348,37 @@ struct cpl_pass_accept_rpl {
        WR_HDR;
        union opcode_tid ot;
        __be32 opt2;
-#define RX_COALESCE_VALID(x) ((x) << 11)
-#define RX_COALESCE(x)       ((x) << 12)
-#define PACE(x)              ((x) << 16)
-#define TX_QUEUE(x)          ((x) << 23)
-#define CCTRL_ECN(x)         ((x) << 27)
-#define TSTAMPS_EN(x)        ((x) << 29)
-#define SACK_EN(x)           ((x) << 30)
        __be64 opt0;
 };
 
+/* option 2 fields */
+#define RX_COALESCE_VALID_S    11
+#define RX_COALESCE_VALID_V(x) ((x) << RX_COALESCE_VALID_S)
+#define RX_COALESCE_VALID_F    RX_COALESCE_VALID_V(1U)
+
+#define RX_COALESCE_S    12
+#define RX_COALESCE_V(x) ((x) << RX_COALESCE_S)
+
+#define PACE_S    16
+#define PACE_V(x) ((x) << PACE_S)
+
+#define TX_QUEUE_S    23
+#define TX_QUEUE_M    0x7
+#define TX_QUEUE_V(x) ((x) << TX_QUEUE_S)
+#define TX_QUEUE_G(x) (((x) >> TX_QUEUE_S) & TX_QUEUE_M)
+
+#define CCTRL_ECN_S    27
+#define CCTRL_ECN_V(x) ((x) << CCTRL_ECN_S)
+#define CCTRL_ECN_F    CCTRL_ECN_V(1U)
+
+#define TSTAMPS_EN_S    29
+#define TSTAMPS_EN_V(x) ((x) << TSTAMPS_EN_S)
+#define TSTAMPS_EN_F    TSTAMPS_EN_V(1U)
+
+#define SACK_EN_S    30
+#define SACK_EN_V(x) ((x) << SACK_EN_S)
+#define SACK_EN_F    SACK_EN_V(1U)
+
 struct cpl_t5_pass_accept_rpl {
        WR_HDR;
        union opcode_tid ot;
@@ -384,30 +449,61 @@ struct cpl_t5_act_open_req6 {
 struct cpl_act_open_rpl {
        union opcode_tid ot;
        __be32 atid_status;
-#define GET_AOPEN_STATUS(x) ((x) & 0xff)
-#define GET_AOPEN_ATID(x)   (((x) >> 8) & 0xffffff)
 };
 
+/* cpl_act_open_rpl.atid_status fields */
+#define AOPEN_STATUS_S    0
+#define AOPEN_STATUS_M    0xFF
+#define AOPEN_STATUS_G(x) (((x) >> AOPEN_STATUS_S) & AOPEN_STATUS_M)
+
+#define AOPEN_ATID_S    8
+#define AOPEN_ATID_M    0xFFFFFF
+#define AOPEN_ATID_G(x) (((x) >> AOPEN_ATID_S) & AOPEN_ATID_M)
+
 struct cpl_pass_establish {
        union opcode_tid ot;
        __be32 rsvd;
        __be32 tos_stid;
-#define PASS_OPEN_TID(x) ((x) << 0)
-#define PASS_OPEN_TOS(x) ((x) << 24)
-#define GET_PASS_OPEN_TID(x)   (((x) >> 0) & 0xFFFFFF)
-#define GET_POPEN_TID(x) ((x) & 0xffffff)
-#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
        __be16 mac_idx;
        __be16 tcp_opt;
-#define GET_TCPOPT_WSCALE_OK(x)  (((x) >> 5) & 1)
-#define GET_TCPOPT_SACK(x)       (((x) >> 6) & 1)
-#define GET_TCPOPT_TSTAMP(x)     (((x) >> 7) & 1)
-#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
-#define GET_TCPOPT_MSS(x)        (((x) >> 12) & 0xf)
        __be32 snd_isn;
        __be32 rcv_isn;
 };
 
+/* cpl_pass_establish.tos_stid fields */
+#define PASS_OPEN_TID_S    0
+#define PASS_OPEN_TID_M    0xFFFFFF
+#define PASS_OPEN_TID_V(x) ((x) << PASS_OPEN_TID_S)
+#define PASS_OPEN_TID_G(x) (((x) >> PASS_OPEN_TID_S) & PASS_OPEN_TID_M)
+
+#define PASS_OPEN_TOS_S    24
+#define PASS_OPEN_TOS_M    0xFF
+#define PASS_OPEN_TOS_V(x) ((x) << PASS_OPEN_TOS_S)
+#define PASS_OPEN_TOS_G(x) (((x) >> PASS_OPEN_TOS_S) & PASS_OPEN_TOS_M)
+
+/* cpl_pass_establish.tcp_opt fields (also applies to act_open_establish) */
+#define TCPOPT_WSCALE_OK_S     5
+#define TCPOPT_WSCALE_OK_M     0x1
+#define TCPOPT_WSCALE_OK_G(x)  \
+       (((x) >> TCPOPT_WSCALE_OK_S) & TCPOPT_WSCALE_OK_M)
+
+#define TCPOPT_SACK_S          6
+#define TCPOPT_SACK_M          0x1
+#define TCPOPT_SACK_G(x)       (((x) >> TCPOPT_SACK_S) & TCPOPT_SACK_M)
+
+#define TCPOPT_TSTAMP_S                7
+#define TCPOPT_TSTAMP_M                0x1
+#define TCPOPT_TSTAMP_G(x)     (((x) >> TCPOPT_TSTAMP_S) & TCPOPT_TSTAMP_M)
+
+#define TCPOPT_SND_WSCALE_S    8
+#define TCPOPT_SND_WSCALE_M    0xF
+#define TCPOPT_SND_WSCALE_G(x) \
+       (((x) >> TCPOPT_SND_WSCALE_S) & TCPOPT_SND_WSCALE_M)
+
+#define TCPOPT_MSS_S   12
+#define TCPOPT_MSS_M   0xF
+#define TCPOPT_MSS_G(x)        (((x) >> TCPOPT_MSS_S) & TCPOPT_MSS_M)
+
 struct cpl_act_establish {
        union opcode_tid ot;
        __be32 rsvd;
@@ -422,24 +518,39 @@ struct cpl_get_tcb {
        WR_HDR;
        union opcode_tid ot;
        __be16 reply_ctrl;
-#define QUEUENO(x)    ((x) << 0)
-#define REPLY_CHAN(x) ((x) << 14)
-#define NO_REPLY(x)   ((x) << 15)
        __be16 cookie;
 };
 
+/* cpl_get_tcb.reply_ctrl fields */
+#define QUEUENO_S    0
+#define QUEUENO_V(x) ((x) << QUEUENO_S)
+
+#define REPLY_CHAN_S    14
+#define REPLY_CHAN_V(x) ((x) << REPLY_CHAN_S)
+#define REPLY_CHAN_F    REPLY_CHAN_V(1U)
+
+#define NO_REPLY_S    15
+#define NO_REPLY_V(x) ((x) << NO_REPLY_S)
+#define NO_REPLY_F    NO_REPLY_V(1U)
+
 struct cpl_set_tcb_field {
        WR_HDR;
        union opcode_tid ot;
        __be16 reply_ctrl;
        __be16 word_cookie;
-#define TCB_WORD(x)   ((x) << 0)
-#define TCB_COOKIE(x) ((x) << 5)
-#define GET_TCB_COOKIE(x) (((x) >> 5) & 7)
        __be64 mask;
        __be64 val;
 };
 
+/* cpl_set_tcb_field.word_cookie fields */
+#define TCB_WORD_S    0
+#define TCB_WORD(x)   ((x) << TCB_WORD_S)
+
+#define TCB_COOKIE_S    5
+#define TCB_COOKIE_M    0x7
+#define TCB_COOKIE_V(x) ((x) << TCB_COOKIE_S)
+#define TCB_COOKIE_G(x) (((x) >> TCB_COOKIE_S) & TCB_COOKIE_M)
+
 struct cpl_set_tcb_rpl {
        union opcode_tid ot;
        __be16 rsvd;
@@ -466,10 +577,14 @@ struct cpl_close_listsvr_req {
        WR_HDR;
        union opcode_tid ot;
        __be16 reply_ctrl;
-#define LISTSVR_IPV6(x) ((x) << 14)
        __be16 rsvd;
 };
 
+/* additional cpl_close_listsvr_req.reply_ctrl field */
+#define LISTSVR_IPV6_S    14
+#define LISTSVR_IPV6_V(x) ((x) << LISTSVR_IPV6_S)
+#define LISTSVR_IPV6_F    LISTSVR_IPV6_V(1U)
+
 struct cpl_close_listsvr_rpl {
        union opcode_tid ot;
        u8 rsvd[3];
@@ -565,6 +680,34 @@ struct cpl_tx_pkt_lso_core {
        /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
 };
 
+/* cpl_tx_pkt_lso_core.lso_ctrl fields */
+#define LSO_TCPHDR_LEN_S    0
+#define LSO_TCPHDR_LEN_V(x) ((x) << LSO_TCPHDR_LEN_S)
+
+#define LSO_IPHDR_LEN_S    4
+#define LSO_IPHDR_LEN_V(x) ((x) << LSO_IPHDR_LEN_S)
+
+#define LSO_ETHHDR_LEN_S    16
+#define LSO_ETHHDR_LEN_V(x) ((x) << LSO_ETHHDR_LEN_S)
+
+#define LSO_IPV6_S    20
+#define LSO_IPV6_V(x) ((x) << LSO_IPV6_S)
+#define LSO_IPV6_F    LSO_IPV6_V(1U)
+
+#define LSO_LAST_SLICE_S    22
+#define LSO_LAST_SLICE_V(x) ((x) << LSO_LAST_SLICE_S)
+#define LSO_LAST_SLICE_F    LSO_LAST_SLICE_V(1U)
+
+#define LSO_FIRST_SLICE_S    23
+#define LSO_FIRST_SLICE_V(x) ((x) << LSO_FIRST_SLICE_S)
+#define LSO_FIRST_SLICE_F    LSO_FIRST_SLICE_V(1U)
+
+#define LSO_OPCODE_S    24
+#define LSO_OPCODE_V(x) ((x) << LSO_OPCODE_S)
+
+#define LSO_T5_XFER_SIZE_S        0
+#define LSO_T5_XFER_SIZE_V(x) ((x) << LSO_T5_XFER_SIZE_S)
+
 struct cpl_tx_pkt_lso {
        WR_HDR;
        struct cpl_tx_pkt_lso_core c;
@@ -574,8 +717,6 @@ struct cpl_tx_pkt_lso {
 struct cpl_iscsi_hdr {
        union opcode_tid ot;
        __be16 pdu_len_ddp;
-#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF)
-#define ISCSI_DDP        (1 << 15)
        __be16 len;
        __be32 seq;
        __be16 urg;
@@ -583,6 +724,16 @@ struct cpl_iscsi_hdr {
        u8 status;
 };
 
+/* cpl_iscsi_hdr.pdu_len_ddp fields */
+#define ISCSI_PDU_LEN_S    0
+#define ISCSI_PDU_LEN_M    0x7FFF
+#define ISCSI_PDU_LEN_V(x) ((x) << ISCSI_PDU_LEN_S)
+#define ISCSI_PDU_LEN_G(x) (((x) >> ISCSI_PDU_LEN_S) & ISCSI_PDU_LEN_M)
+
+#define ISCSI_DDP_S    15
+#define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S)
+#define ISCSI_DDP_F    ISCSI_DDP_V(1U)
+
 struct cpl_rx_data {
        union opcode_tid ot;
        __be16 rsvd;
@@ -639,49 +790,61 @@ struct cpl_rx_pkt {
        __be16 vlan;
        __be16 len;
        __be32 l2info;
-#define RXF_UDP (1 << 22)
-#define RXF_TCP (1 << 23)
-#define RXF_IP  (1 << 24)
-#define RXF_IP6 (1 << 25)
        __be16 hdr_len;
        __be16 err_vec;
 };
 
+#define RXF_UDP_S    22
+#define RXF_UDP_V(x) ((x) << RXF_UDP_S)
+#define RXF_UDP_F    RXF_UDP_V(1U)
+
+#define RXF_TCP_S    23
+#define RXF_TCP_V(x) ((x) << RXF_TCP_S)
+#define RXF_TCP_F    RXF_TCP_V(1U)
+
+#define RXF_IP_S    24
+#define RXF_IP_V(x) ((x) << RXF_IP_S)
+#define RXF_IP_F    RXF_IP_V(1U)
+
+#define RXF_IP6_S    25
+#define RXF_IP6_V(x) ((x) << RXF_IP6_S)
+#define RXF_IP6_F    RXF_IP6_V(1U)
+
 /* rx_pkt.l2info fields */
-#define S_RX_ETHHDR_LEN    0
-#define M_RX_ETHHDR_LEN    0x1F
-#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
-#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
-
-#define S_RX_T5_ETHHDR_LEN    0
-#define M_RX_T5_ETHHDR_LEN    0x3F
-#define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN)
-#define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN)
-
-#define S_RX_MACIDX    8
-#define M_RX_MACIDX    0x1FF
-#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
-#define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX)
-
-#define S_RXF_SYN    21
-#define V_RXF_SYN(x) ((x) << S_RXF_SYN)
-#define F_RXF_SYN    V_RXF_SYN(1U)
-
-#define S_RX_CHAN    28
-#define M_RX_CHAN    0xF
-#define V_RX_CHAN(x) ((x) << S_RX_CHAN)
-#define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN)
+#define RX_ETHHDR_LEN_S    0
+#define RX_ETHHDR_LEN_M    0x1F
+#define RX_ETHHDR_LEN_V(x) ((x) << RX_ETHHDR_LEN_S)
+#define RX_ETHHDR_LEN_G(x) (((x) >> RX_ETHHDR_LEN_S) & RX_ETHHDR_LEN_M)
+
+#define RX_T5_ETHHDR_LEN_S    0
+#define RX_T5_ETHHDR_LEN_M    0x3F
+#define RX_T5_ETHHDR_LEN_V(x) ((x) << RX_T5_ETHHDR_LEN_S)
+#define RX_T5_ETHHDR_LEN_G(x) (((x) >> RX_T5_ETHHDR_LEN_S) & RX_T5_ETHHDR_LEN_M)
+
+#define RX_MACIDX_S    8
+#define RX_MACIDX_M    0x1FF
+#define RX_MACIDX_V(x) ((x) << RX_MACIDX_S)
+#define RX_MACIDX_G(x) (((x) >> RX_MACIDX_S) & RX_MACIDX_M)
+
+#define RXF_SYN_S    21
+#define RXF_SYN_V(x) ((x) << RXF_SYN_S)
+#define RXF_SYN_F    RXF_SYN_V(1U)
+
+#define RX_CHAN_S    28
+#define RX_CHAN_M    0xF
+#define RX_CHAN_V(x) ((x) << RX_CHAN_S)
+#define RX_CHAN_G(x) (((x) >> RX_CHAN_S) & RX_CHAN_M)
 
 /* rx_pkt.hdr_len fields */
-#define S_RX_TCPHDR_LEN    0
-#define M_RX_TCPHDR_LEN    0x3F
-#define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN)
-#define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN)
+#define RX_TCPHDR_LEN_S    0
+#define RX_TCPHDR_LEN_M    0x3F
+#define RX_TCPHDR_LEN_V(x) ((x) << RX_TCPHDR_LEN_S)
+#define RX_TCPHDR_LEN_G(x) (((x) >> RX_TCPHDR_LEN_S) & RX_TCPHDR_LEN_M)
 
-#define S_RX_IPHDR_LEN    6
-#define M_RX_IPHDR_LEN    0x3FF
-#define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN)
-#define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN)
+#define RX_IPHDR_LEN_S    6
+#define RX_IPHDR_LEN_M    0x3FF
+#define RX_IPHDR_LEN_V(x) ((x) << RX_IPHDR_LEN_S)
+#define RX_IPHDR_LEN_G(x) (((x) >> RX_IPHDR_LEN_S) & RX_IPHDR_LEN_M)
 
 struct cpl_trace_pkt {
        u8 opcode;
@@ -730,14 +893,22 @@ struct cpl_l2t_write_req {
        WR_HDR;
        union opcode_tid ot;
        __be16 params;
-#define L2T_W_INFO(x)    ((x) << 2)
-#define L2T_W_PORT(x)    ((x) << 8)
-#define L2T_W_NOREPLY(x) ((x) << 15)
        __be16 l2t_idx;
        __be16 vlan;
        u8 dst_mac[6];
 };
 
+/* cpl_l2t_write_req.params fields */
+#define L2T_W_INFO_S    2
+#define L2T_W_INFO_V(x) ((x) << L2T_W_INFO_S)
+
+#define L2T_W_PORT_S    8
+#define L2T_W_PORT_V(x) ((x) << L2T_W_PORT_S)
+
+#define L2T_W_NOREPLY_S    15
+#define L2T_W_NOREPLY_V(x) ((x) << L2T_W_NOREPLY_S)
+#define L2T_W_NOREPLY_F    L2T_W_NOREPLY_V(1U)
+
 struct cpl_l2t_write_rpl {
        union opcode_tid ot;
        u8 status;
@@ -752,11 +923,15 @@ struct cpl_rdma_terminate {
 
 struct cpl_sge_egr_update {
        __be32 opcode_qid;
-#define EGR_QID(x) ((x) & 0x1FFFF)
        __be16 cidx;
        __be16 pidx;
 };
 
+/* cpl_sge_egr_update.ot fields */
+#define EGR_QID_S    0
+#define EGR_QID_M    0x1FFFF
+#define EGR_QID_G(x) (((x) >> EGR_QID_S) & EGR_QID_M)
+
 /* cpl_fw*.type values */
 enum {
        FW_TYPE_CMD_RPL = 0,
@@ -849,22 +1024,30 @@ struct ulptx_sge_pair {
 
 struct ulptx_sgl {
        __be32 cmd_nsge;
-#define ULPTX_NSGE(x) ((x) << 0)
-#define ULPTX_MORE (1U << 23)
        __be32 len0;
        __be64 addr0;
        struct ulptx_sge_pair sge[0];
 };
 
+#define ULPTX_NSGE_S    0
+#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
+
+#define ULPTX_MORE_S   23
+#define ULPTX_MORE_V(x)        ((x) << ULPTX_MORE_S)
+#define ULPTX_MORE_F   ULPTX_MORE_V(1U)
+
 struct ulp_mem_io {
        WR_HDR;
        __be32 cmd;
        __be32 len16;             /* command length */
        __be32 dlen;              /* data length in 32-byte units */
        __be32 lock_addr;
-#define ULP_MEMIO_LOCK(x) ((x) << 31)
 };
 
+#define ULP_MEMIO_LOCK_S    31
+#define ULP_MEMIO_LOCK_V(x) ((x) << ULP_MEMIO_LOCK_S)
+#define ULP_MEMIO_LOCK_F    ULP_MEMIO_LOCK_V(1U)
+
 /* additional ulp_mem_io.cmd fields */
 #define ULP_MEMIO_ORDER_S    23
 #define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S)
@@ -874,13 +1057,9 @@ struct ulp_mem_io {
 #define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S)
 #define T5_ULP_MEMIO_IMM_F    T5_ULP_MEMIO_IMM_V(1U)
 
-#define S_T5_ULP_MEMIO_IMM    23
-#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM)
-#define F_T5_ULP_MEMIO_IMM    V_T5_ULP_MEMIO_IMM(1U)
-
-#define S_T5_ULP_MEMIO_ORDER    22
-#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
-#define F_T5_ULP_MEMIO_ORDER    V_T5_ULP_MEMIO_ORDER(1U)
+#define T5_ULP_MEMIO_ORDER_S    22
+#define T5_ULP_MEMIO_ORDER_V(x) ((x) << T5_ULP_MEMIO_ORDER_S)
+#define T5_ULP_MEMIO_ORDER_F    T5_ULP_MEMIO_ORDER_V(1U)
 
 /* ulp_mem_io.lock_addr fields */
 #define ULP_MEMIO_ADDR_S    0
index 9e4f95a91fb4004af5d63569a316b0beb2deca93..ddfb5b846045d0e156c4287b2edb50c6d250b40f 100644 (file)
@@ -153,6 +153,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */
        CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */
        CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
+       CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */
 CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
 
 #endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
index d7bd34ee65bdbcab1acdbdb5826f5248063e2a9f..231a725f6d5d1679c4d6b07ff6694ac387f0b66a 100644 (file)
 #define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
 #define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
 
-#define SGE_PF_KDOORBELL 0x0
-#define  QID_MASK    0xffff8000U
-#define  QID_SHIFT   15
-#define  QID(x)      ((x) << QID_SHIFT)
-#define  DBPRIO(x)   ((x) << 14)
-#define  DBTYPE(x)   ((x) << 13)
-#define  PIDX_MASK   0x00003fffU
-#define  PIDX_SHIFT  0
-#define  PIDX(x)     ((x) << PIDX_SHIFT)
-#define  PIDX_SHIFT_T5   0
-#define  PIDX_T5(x)  ((x) << PIDX_SHIFT_T5)
-
-
-#define SGE_TIMERREGS  6
-#define SGE_PF_GTS 0x4
-#define  INGRESSQID_MASK   0xffff0000U
-#define  INGRESSQID_SHIFT  16
-#define  INGRESSQID(x)     ((x) << INGRESSQID_SHIFT)
-#define  TIMERREG_MASK     0x0000e000U
-#define  TIMERREG_SHIFT    13
-#define  TIMERREG(x)       ((x) << TIMERREG_SHIFT)
-#define  SEINTARM_MASK     0x00001000U
-#define  SEINTARM_SHIFT    12
-#define  SEINTARM(x)       ((x) << SEINTARM_SHIFT)
-#define  CIDXINC_MASK      0x00000fffU
-#define  CIDXINC_SHIFT     0
-#define  CIDXINC(x)        ((x) << CIDXINC_SHIFT)
-
-#define X_RXPKTCPLMODE_SPLIT     1
-#define X_INGPADBOUNDARY_SHIFT 5
-
-#define SGE_CONTROL 0x1008
-#define SGE_CONTROL2_A         0x1124
-#define  DCASYSTYPE             0x00080000U
-#define  RXPKTCPLMODE_MASK      0x00040000U
-#define  RXPKTCPLMODE_SHIFT     18
-#define  RXPKTCPLMODE(x)        ((x) << RXPKTCPLMODE_SHIFT)
-#define  EGRSTATUSPAGESIZE_MASK  0x00020000U
-#define  EGRSTATUSPAGESIZE_SHIFT 17
-#define  EGRSTATUSPAGESIZE(x)    ((x) << EGRSTATUSPAGESIZE_SHIFT)
-#define  PKTSHIFT_MASK          0x00001c00U
-#define  PKTSHIFT_SHIFT         10
-#define  PKTSHIFT(x)            ((x) << PKTSHIFT_SHIFT)
-#define  PKTSHIFT_GET(x)       (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
-#define  INGPCIEBOUNDARY_32B_X 0
-#define  INGPCIEBOUNDARY_MASK   0x00000380U
-#define  INGPCIEBOUNDARY_SHIFT  7
-#define  INGPCIEBOUNDARY(x)     ((x) << INGPCIEBOUNDARY_SHIFT)
-#define  INGPADBOUNDARY_MASK    0x00000070U
-#define  INGPADBOUNDARY_SHIFT   4
-#define  INGPADBOUNDARY(x)      ((x) << INGPADBOUNDARY_SHIFT)
-#define  INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
-                                >> INGPADBOUNDARY_SHIFT)
-#define  INGPACKBOUNDARY_16B_X 0
-#define  INGPACKBOUNDARY_SHIFT_X 5
+#define SGE_PF_KDOORBELL_A 0x0
+
+#define QID_S    15
+#define QID_V(x) ((x) << QID_S)
+
+#define DBPRIO_S    14
+#define DBPRIO_V(x) ((x) << DBPRIO_S)
+#define DBPRIO_F    DBPRIO_V(1U)
+
+#define PIDX_S    0
+#define PIDX_V(x) ((x) << PIDX_S)
+
+#define SGE_VF_KDOORBELL_A 0x0
+
+#define DBTYPE_S    13
+#define DBTYPE_V(x) ((x) << DBTYPE_S)
+#define DBTYPE_F    DBTYPE_V(1U)
+
+#define PIDX_T5_S    0
+#define PIDX_T5_M    0x1fffU
+#define PIDX_T5_V(x) ((x) << PIDX_T5_S)
+#define PIDX_T5_G(x) (((x) >> PIDX_T5_S) & PIDX_T5_M)
+
+#define SGE_PF_GTS_A 0x4
+
+#define INGRESSQID_S    16
+#define INGRESSQID_V(x) ((x) << INGRESSQID_S)
+
+#define TIMERREG_S    13
+#define TIMERREG_V(x) ((x) << TIMERREG_S)
+
+#define SEINTARM_S    12
+#define SEINTARM_V(x) ((x) << SEINTARM_S)
+
+#define CIDXINC_S    0
+#define CIDXINC_M    0xfffU
+#define CIDXINC_V(x) ((x) << CIDXINC_S)
+
+#define SGE_CONTROL_A  0x1008
+#define SGE_CONTROL2_A 0x1124
+
+#define RXPKTCPLMODE_S    18
+#define RXPKTCPLMODE_V(x) ((x) << RXPKTCPLMODE_S)
+#define RXPKTCPLMODE_F    RXPKTCPLMODE_V(1U)
+
+#define EGRSTATUSPAGESIZE_S    17
+#define EGRSTATUSPAGESIZE_V(x) ((x) << EGRSTATUSPAGESIZE_S)
+#define EGRSTATUSPAGESIZE_F    EGRSTATUSPAGESIZE_V(1U)
+
+#define PKTSHIFT_S    10
+#define PKTSHIFT_M    0x7U
+#define PKTSHIFT_V(x) ((x) << PKTSHIFT_S)
+#define PKTSHIFT_G(x) (((x) >> PKTSHIFT_S) & PKTSHIFT_M)
+
+#define INGPCIEBOUNDARY_S    7
+#define INGPCIEBOUNDARY_V(x) ((x) << INGPCIEBOUNDARY_S)
+
+#define INGPADBOUNDARY_S    4
+#define INGPADBOUNDARY_M    0x7U
+#define INGPADBOUNDARY_V(x) ((x) << INGPADBOUNDARY_S)
+#define INGPADBOUNDARY_G(x) (((x) >> INGPADBOUNDARY_S) & INGPADBOUNDARY_M)
+
+#define EGRPCIEBOUNDARY_S    1
+#define EGRPCIEBOUNDARY_V(x) ((x) << EGRPCIEBOUNDARY_S)
 
 #define  INGPACKBOUNDARY_S     16
 #define  INGPACKBOUNDARY_M     0x7U
 #define  INGPACKBOUNDARY_V(x)  ((x) << INGPACKBOUNDARY_S)
 #define  INGPACKBOUNDARY_G(x)  (((x) >> INGPACKBOUNDARY_S) \
                                 & INGPACKBOUNDARY_M)
-#define  EGRPCIEBOUNDARY_MASK   0x0000000eU
-#define  EGRPCIEBOUNDARY_SHIFT  1
-#define  EGRPCIEBOUNDARY(x)     ((x) << EGRPCIEBOUNDARY_SHIFT)
-#define  GLOBALENABLE           0x00000001U
 
-#define SGE_HOST_PAGE_SIZE 0x100c
+#define GLOBALENABLE_S    0
+#define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
+#define GLOBALENABLE_F    GLOBALENABLE_V(1U)
+
+#define SGE_HOST_PAGE_SIZE_A 0x100c
+
+#define HOSTPAGESIZEPF7_S    28
+#define HOSTPAGESIZEPF7_M    0xfU
+#define HOSTPAGESIZEPF7_V(x) ((x) << HOSTPAGESIZEPF7_S)
+#define HOSTPAGESIZEPF7_G(x) (((x) >> HOSTPAGESIZEPF7_S) & HOSTPAGESIZEPF7_M)
+
+#define HOSTPAGESIZEPF6_S    24
+#define HOSTPAGESIZEPF6_M    0xfU
+#define HOSTPAGESIZEPF6_V(x) ((x) << HOSTPAGESIZEPF6_S)
+#define HOSTPAGESIZEPF6_G(x) (((x) >> HOSTPAGESIZEPF6_S) & HOSTPAGESIZEPF6_M)
+
+#define HOSTPAGESIZEPF5_S    20
+#define HOSTPAGESIZEPF5_M    0xfU
+#define HOSTPAGESIZEPF5_V(x) ((x) << HOSTPAGESIZEPF5_S)
+#define HOSTPAGESIZEPF5_G(x) (((x) >> HOSTPAGESIZEPF5_S) & HOSTPAGESIZEPF5_M)
+
+#define HOSTPAGESIZEPF4_S    16
+#define HOSTPAGESIZEPF4_M    0xfU
+#define HOSTPAGESIZEPF4_V(x) ((x) << HOSTPAGESIZEPF4_S)
+#define HOSTPAGESIZEPF4_G(x) (((x) >> HOSTPAGESIZEPF4_S) & HOSTPAGESIZEPF4_M)
+
+#define HOSTPAGESIZEPF3_S    12
+#define HOSTPAGESIZEPF3_M    0xfU
+#define HOSTPAGESIZEPF3_V(x) ((x) << HOSTPAGESIZEPF3_S)
+#define HOSTPAGESIZEPF3_G(x) (((x) >> HOSTPAGESIZEPF3_S) & HOSTPAGESIZEPF3_M)
+
+#define HOSTPAGESIZEPF2_S    8
+#define HOSTPAGESIZEPF2_M    0xfU
+#define HOSTPAGESIZEPF2_V(x) ((x) << HOSTPAGESIZEPF2_S)
+#define HOSTPAGESIZEPF2_G(x) (((x) >> HOSTPAGESIZEPF2_S) & HOSTPAGESIZEPF2_M)
+
+#define HOSTPAGESIZEPF1_S    4
+#define HOSTPAGESIZEPF1_M    0xfU
+#define HOSTPAGESIZEPF1_V(x) ((x) << HOSTPAGESIZEPF1_S)
+#define HOSTPAGESIZEPF1_G(x) (((x) >> HOSTPAGESIZEPF1_S) & HOSTPAGESIZEPF1_M)
+
+#define HOSTPAGESIZEPF0_S    0
+#define HOSTPAGESIZEPF0_M    0xfU
+#define HOSTPAGESIZEPF0_V(x) ((x) << HOSTPAGESIZEPF0_S)
+#define HOSTPAGESIZEPF0_G(x) (((x) >> HOSTPAGESIZEPF0_S) & HOSTPAGESIZEPF0_M)
+
+#define SGE_EGRESS_QUEUES_PER_PAGE_PF_A 0x1010
+#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014
 
-#define  HOSTPAGESIZEPF7_MASK   0x0000000fU
-#define  HOSTPAGESIZEPF7_SHIFT  28
-#define  HOSTPAGESIZEPF7(x)     ((x) << HOSTPAGESIZEPF7_SHIFT)
+#define QUEUESPERPAGEPF1_S    4
 
-#define  HOSTPAGESIZEPF6_MASK   0x0000000fU
-#define  HOSTPAGESIZEPF6_SHIFT  24
-#define  HOSTPAGESIZEPF6(x)     ((x) << HOSTPAGESIZEPF6_SHIFT)
+#define QUEUESPERPAGEPF0_S    0
+#define QUEUESPERPAGEPF0_M    0xfU
+#define QUEUESPERPAGEPF0_V(x) ((x) << QUEUESPERPAGEPF0_S)
+#define QUEUESPERPAGEPF0_G(x) (((x) >> QUEUESPERPAGEPF0_S) & QUEUESPERPAGEPF0_M)
 
-#define  HOSTPAGESIZEPF5_MASK   0x0000000fU
-#define  HOSTPAGESIZEPF5_SHIFT  20
-#define  HOSTPAGESIZEPF5(x)     ((x) << HOSTPAGESIZEPF5_SHIFT)
+#define SGE_INT_CAUSE1_A       0x1024
+#define SGE_INT_CAUSE2_A       0x1030
+#define SGE_INT_CAUSE3_A       0x103c
+
+#define ERR_FLM_DBP_S    31
+#define ERR_FLM_DBP_V(x) ((x) << ERR_FLM_DBP_S)
+#define ERR_FLM_DBP_F    ERR_FLM_DBP_V(1U)
+
+#define ERR_FLM_IDMA1_S    30
+#define ERR_FLM_IDMA1_V(x) ((x) << ERR_FLM_IDMA1_S)
+#define ERR_FLM_IDMA1_F    ERR_FLM_IDMA1_V(1U)
+
+#define ERR_FLM_IDMA0_S    29
+#define ERR_FLM_IDMA0_V(x) ((x) << ERR_FLM_IDMA0_S)
+#define ERR_FLM_IDMA0_F    ERR_FLM_IDMA0_V(1U)
+
+#define ERR_FLM_HINT_S    28
+#define ERR_FLM_HINT_V(x) ((x) << ERR_FLM_HINT_S)
+#define ERR_FLM_HINT_F    ERR_FLM_HINT_V(1U)
+
+#define ERR_PCIE_ERROR3_S    27
+#define ERR_PCIE_ERROR3_V(x) ((x) << ERR_PCIE_ERROR3_S)
+#define ERR_PCIE_ERROR3_F    ERR_PCIE_ERROR3_V(1U)
+
+#define ERR_PCIE_ERROR2_S    26
+#define ERR_PCIE_ERROR2_V(x) ((x) << ERR_PCIE_ERROR2_S)
+#define ERR_PCIE_ERROR2_F    ERR_PCIE_ERROR2_V(1U)
+
+#define ERR_PCIE_ERROR1_S    25
+#define ERR_PCIE_ERROR1_V(x) ((x) << ERR_PCIE_ERROR1_S)
+#define ERR_PCIE_ERROR1_F    ERR_PCIE_ERROR1_V(1U)
+
+#define ERR_PCIE_ERROR0_S    24
+#define ERR_PCIE_ERROR0_V(x) ((x) << ERR_PCIE_ERROR0_S)
+#define ERR_PCIE_ERROR0_F    ERR_PCIE_ERROR0_V(1U)
+
+#define ERR_CPL_EXCEED_IQE_SIZE_S    22
+#define ERR_CPL_EXCEED_IQE_SIZE_V(x) ((x) << ERR_CPL_EXCEED_IQE_SIZE_S)
+#define ERR_CPL_EXCEED_IQE_SIZE_F    ERR_CPL_EXCEED_IQE_SIZE_V(1U)
+
+#define ERR_INVALID_CIDX_INC_S    21
+#define ERR_INVALID_CIDX_INC_V(x) ((x) << ERR_INVALID_CIDX_INC_S)
+#define ERR_INVALID_CIDX_INC_F    ERR_INVALID_CIDX_INC_V(1U)
+
+#define ERR_CPL_OPCODE_0_S    19
+#define ERR_CPL_OPCODE_0_V(x) ((x) << ERR_CPL_OPCODE_0_S)
+#define ERR_CPL_OPCODE_0_F    ERR_CPL_OPCODE_0_V(1U)
+
+#define ERR_DROPPED_DB_S    18
+#define ERR_DROPPED_DB_V(x) ((x) << ERR_DROPPED_DB_S)
+#define ERR_DROPPED_DB_F    ERR_DROPPED_DB_V(1U)
+
+#define ERR_DATA_CPL_ON_HIGH_QID1_S    17
+#define ERR_DATA_CPL_ON_HIGH_QID1_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID1_S)
+#define ERR_DATA_CPL_ON_HIGH_QID1_F    ERR_DATA_CPL_ON_HIGH_QID1_V(1U)
+
+#define ERR_DATA_CPL_ON_HIGH_QID0_S    16
+#define ERR_DATA_CPL_ON_HIGH_QID0_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID0_S)
+#define ERR_DATA_CPL_ON_HIGH_QID0_F    ERR_DATA_CPL_ON_HIGH_QID0_V(1U)
+
+#define ERR_BAD_DB_PIDX3_S    15
+#define ERR_BAD_DB_PIDX3_V(x) ((x) << ERR_BAD_DB_PIDX3_S)
+#define ERR_BAD_DB_PIDX3_F    ERR_BAD_DB_PIDX3_V(1U)
+
+#define ERR_BAD_DB_PIDX2_S    14
+#define ERR_BAD_DB_PIDX2_V(x) ((x) << ERR_BAD_DB_PIDX2_S)
+#define ERR_BAD_DB_PIDX2_F    ERR_BAD_DB_PIDX2_V(1U)
+
+#define ERR_BAD_DB_PIDX1_S    13
+#define ERR_BAD_DB_PIDX1_V(x) ((x) << ERR_BAD_DB_PIDX1_S)
+#define ERR_BAD_DB_PIDX1_F    ERR_BAD_DB_PIDX1_V(1U)
+
+#define ERR_BAD_DB_PIDX0_S    12
+#define ERR_BAD_DB_PIDX0_V(x) ((x) << ERR_BAD_DB_PIDX0_S)
+#define ERR_BAD_DB_PIDX0_F    ERR_BAD_DB_PIDX0_V(1U)
+
+#define ERR_ING_CTXT_PRIO_S    10
+#define ERR_ING_CTXT_PRIO_V(x) ((x) << ERR_ING_CTXT_PRIO_S)
+#define ERR_ING_CTXT_PRIO_F    ERR_ING_CTXT_PRIO_V(1U)
+
+#define ERR_EGR_CTXT_PRIO_S    9
+#define ERR_EGR_CTXT_PRIO_V(x) ((x) << ERR_EGR_CTXT_PRIO_S)
+#define ERR_EGR_CTXT_PRIO_F    ERR_EGR_CTXT_PRIO_V(1U)
+
+#define DBFIFO_HP_INT_S    8
+#define DBFIFO_HP_INT_V(x) ((x) << DBFIFO_HP_INT_S)
+#define DBFIFO_HP_INT_F    DBFIFO_HP_INT_V(1U)
+
+#define DBFIFO_LP_INT_S    7
+#define DBFIFO_LP_INT_V(x) ((x) << DBFIFO_LP_INT_S)
+#define DBFIFO_LP_INT_F    DBFIFO_LP_INT_V(1U)
+
+#define INGRESS_SIZE_ERR_S    5
+#define INGRESS_SIZE_ERR_V(x) ((x) << INGRESS_SIZE_ERR_S)
+#define INGRESS_SIZE_ERR_F    INGRESS_SIZE_ERR_V(1U)
+
+#define EGRESS_SIZE_ERR_S    4
+#define EGRESS_SIZE_ERR_V(x) ((x) << EGRESS_SIZE_ERR_S)
+#define EGRESS_SIZE_ERR_F    EGRESS_SIZE_ERR_V(1U)
+
+#define SGE_INT_ENABLE3_A 0x1040
+#define SGE_FL_BUFFER_SIZE0_A 0x1044
+#define SGE_FL_BUFFER_SIZE1_A 0x1048
+#define SGE_FL_BUFFER_SIZE2_A 0x104c
+#define SGE_FL_BUFFER_SIZE3_A 0x1050
+#define SGE_FL_BUFFER_SIZE4_A 0x1054
+#define SGE_FL_BUFFER_SIZE5_A 0x1058
+#define SGE_FL_BUFFER_SIZE6_A 0x105c
+#define SGE_FL_BUFFER_SIZE7_A 0x1060
+#define SGE_FL_BUFFER_SIZE8_A 0x1064
+
+#define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
+
+#define THRESHOLD_0_S    24
+#define THRESHOLD_0_M    0x3fU
+#define THRESHOLD_0_V(x) ((x) << THRESHOLD_0_S)
+#define THRESHOLD_0_G(x) (((x) >> THRESHOLD_0_S) & THRESHOLD_0_M)
+
+#define THRESHOLD_1_S    16
+#define THRESHOLD_1_M    0x3fU
+#define THRESHOLD_1_V(x) ((x) << THRESHOLD_1_S)
+#define THRESHOLD_1_G(x) (((x) >> THRESHOLD_1_S) & THRESHOLD_1_M)
+
+#define THRESHOLD_2_S    8
+#define THRESHOLD_2_M    0x3fU
+#define THRESHOLD_2_V(x) ((x) << THRESHOLD_2_S)
+#define THRESHOLD_2_G(x) (((x) >> THRESHOLD_2_S) & THRESHOLD_2_M)
+
+#define THRESHOLD_3_S    0
+#define THRESHOLD_3_M    0x3fU
+#define THRESHOLD_3_V(x) ((x) << THRESHOLD_3_S)
+#define THRESHOLD_3_G(x) (((x) >> THRESHOLD_3_S) & THRESHOLD_3_M)
+
+#define SGE_CONM_CTRL_A 0x1094
+
+#define EGRTHRESHOLD_S    8
+#define EGRTHRESHOLD_M    0x3fU
+#define EGRTHRESHOLD_V(x) ((x) << EGRTHRESHOLD_S)
+#define EGRTHRESHOLD_G(x) (((x) >> EGRTHRESHOLD_S) & EGRTHRESHOLD_M)
+
+#define EGRTHRESHOLDPACKING_S    14
+#define EGRTHRESHOLDPACKING_M    0x3fU
+#define EGRTHRESHOLDPACKING_V(x) ((x) << EGRTHRESHOLDPACKING_S)
+#define EGRTHRESHOLDPACKING_G(x) \
+       (((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M)
+
+#define SGE_TIMESTAMP_LO_A 0x1098
+#define SGE_TIMESTAMP_HI_A 0x109c
+
+#define TSOP_S    28
+#define TSOP_M    0x3U
+#define TSOP_V(x) ((x) << TSOP_S)
+#define TSOP_G(x) (((x) >> TSOP_S) & TSOP_M)
+
+#define TSVAL_S    0
+#define TSVAL_M    0xfffffffU
+#define TSVAL_V(x) ((x) << TSVAL_S)
+#define TSVAL_G(x) (((x) >> TSVAL_S) & TSVAL_M)
+
+#define SGE_DBFIFO_STATUS_A 0x10a4
+
+#define HP_INT_THRESH_S    28
+#define HP_INT_THRESH_M    0xfU
+#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
+
+#define LP_INT_THRESH_S    12
+#define LP_INT_THRESH_M    0xfU
+#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S)
+
+#define SGE_DOORBELL_CONTROL_A 0x10a8
+
+#define NOCOALESCE_S    26
+#define NOCOALESCE_V(x) ((x) << NOCOALESCE_S)
+#define NOCOALESCE_F    NOCOALESCE_V(1U)
+
+#define ENABLE_DROP_S    13
+#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S)
+#define ENABLE_DROP_F    ENABLE_DROP_V(1U)
+
+#define SGE_TIMER_VALUE_0_AND_1_A 0x10b8
+
+#define TIMERVALUE0_S    16
+#define TIMERVALUE0_M    0xffffU
+#define TIMERVALUE0_V(x) ((x) << TIMERVALUE0_S)
+#define TIMERVALUE0_G(x) (((x) >> TIMERVALUE0_S) & TIMERVALUE0_M)
+
+#define TIMERVALUE1_S    0
+#define TIMERVALUE1_M    0xffffU
+#define TIMERVALUE1_V(x) ((x) << TIMERVALUE1_S)
+#define TIMERVALUE1_G(x) (((x) >> TIMERVALUE1_S) & TIMERVALUE1_M)
+
+#define SGE_TIMER_VALUE_2_AND_3_A 0x10bc
+
+#define TIMERVALUE2_S    16
+#define TIMERVALUE2_M    0xffffU
+#define TIMERVALUE2_V(x) ((x) << TIMERVALUE2_S)
+#define TIMERVALUE2_G(x) (((x) >> TIMERVALUE2_S) & TIMERVALUE2_M)
+
+#define TIMERVALUE3_S    0
+#define TIMERVALUE3_M    0xffffU
+#define TIMERVALUE3_V(x) ((x) << TIMERVALUE3_S)
+#define TIMERVALUE3_G(x) (((x) >> TIMERVALUE3_S) & TIMERVALUE3_M)
+
+#define SGE_TIMER_VALUE_4_AND_5_A 0x10c0
+
+#define TIMERVALUE4_S    16
+#define TIMERVALUE4_M    0xffffU
+#define TIMERVALUE4_V(x) ((x) << TIMERVALUE4_S)
+#define TIMERVALUE4_G(x) (((x) >> TIMERVALUE4_S) & TIMERVALUE4_M)
 
-#define  HOSTPAGESIZEPF4_MASK   0x0000000fU
-#define  HOSTPAGESIZEPF4_SHIFT  16
-#define  HOSTPAGESIZEPF4(x)     ((x) << HOSTPAGESIZEPF4_SHIFT)
+#define TIMERVALUE5_S    0
+#define TIMERVALUE5_M    0xffffU
+#define TIMERVALUE5_V(x) ((x) << TIMERVALUE5_S)
+#define TIMERVALUE5_G(x) (((x) >> TIMERVALUE5_S) & TIMERVALUE5_M)
 
-#define  HOSTPAGESIZEPF3_MASK   0x0000000fU
-#define  HOSTPAGESIZEPF3_SHIFT  12
-#define  HOSTPAGESIZEPF3(x)     ((x) << HOSTPAGESIZEPF3_SHIFT)
+#define SGE_DEBUG_INDEX_A 0x10cc
+#define SGE_DEBUG_DATA_HIGH_A 0x10d0
+#define SGE_DEBUG_DATA_LOW_A 0x10d4
 
-#define  HOSTPAGESIZEPF2_MASK   0x0000000fU
-#define  HOSTPAGESIZEPF2_SHIFT  8
-#define  HOSTPAGESIZEPF2(x)     ((x) << HOSTPAGESIZEPF2_SHIFT)
+#define SGE_DEBUG_DATA_LOW_INDEX_2_A   0x12c8
+#define SGE_DEBUG_DATA_LOW_INDEX_3_A   0x12cc
+#define SGE_DEBUG_DATA_HIGH_INDEX_10_A 0x12a8
 
-#define  HOSTPAGESIZEPF1_M     0x0000000fU
-#define  HOSTPAGESIZEPF1_S     4
-#define  HOSTPAGESIZEPF1(x)     ((x) << HOSTPAGESIZEPF1_S)
+#define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4
+#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
 
-#define  HOSTPAGESIZEPF0_M     0x0000000fU
-#define  HOSTPAGESIZEPF0_S     0
-#define  HOSTPAGESIZEPF0(x)     ((x) << HOSTPAGESIZEPF0_S)
+#define HP_INT_THRESH_S    28
+#define HP_INT_THRESH_M    0xfU
+#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
 
-#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
-#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014
+#define HP_COUNT_S    16
+#define HP_COUNT_M    0x7ffU
+#define HP_COUNT_G(x) (((x) >> HP_COUNT_S) & HP_COUNT_M)
 
-#define QUEUESPERPAGEPF1_S    4
+#define LP_INT_THRESH_S    12
+#define LP_INT_THRESH_M    0xfU
+#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S)
 
-#define QUEUESPERPAGEPF0_S    0
-#define QUEUESPERPAGEPF0_MASK   0x0000000fU
-#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
+#define LP_COUNT_S    0
+#define LP_COUNT_M    0x7ffU
+#define LP_COUNT_G(x) (((x) >> LP_COUNT_S) & LP_COUNT_M)
 
-#define QUEUESPERPAGEPF0    0
-#define QUEUESPERPAGEPF1    4
+#define LP_INT_THRESH_T5_S    18
+#define LP_INT_THRESH_T5_M    0xfffU
+#define LP_INT_THRESH_T5_V(x) ((x) << LP_INT_THRESH_T5_S)
 
-/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
- * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
- * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
- * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64.  For Ingress Queues,
- * we have a Going To Sleep register at offsets 8x+4.
- *
- * As noted above, we have many instances of the Simple Doorbell and Going To
- * Sleep registers at offsets 8x and 8x+4, respectively.  We want to use a
- * non-64-byte aligned offset for the Simple Doorbell in order to attempt to
- * avoid buffering of the writes to the Simple Doorbell and we want to use a
- * non-contiguous offset for the Going To Sleep writes in order to avoid
- * possible combining between them.
- */
-#define SGE_UDB_SIZE            128
-#define SGE_UDB_KDOORBELL       8
-#define SGE_UDB_GTS             20
-#define SGE_UDB_WCDOORBELL      64
-
-#define SGE_INT_CAUSE1 0x1024
-#define SGE_INT_CAUSE2 0x1030
-#define SGE_INT_CAUSE3 0x103c
-#define  ERR_FLM_DBP               0x80000000U
-#define  ERR_FLM_IDMA1             0x40000000U
-#define  ERR_FLM_IDMA0             0x20000000U
-#define  ERR_FLM_HINT              0x10000000U
-#define  ERR_PCIE_ERROR3           0x08000000U
-#define  ERR_PCIE_ERROR2           0x04000000U
-#define  ERR_PCIE_ERROR1           0x02000000U
-#define  ERR_PCIE_ERROR0           0x01000000U
-#define  ERR_TIMER_ABOVE_MAX_QID   0x00800000U
-#define  ERR_CPL_EXCEED_IQE_SIZE   0x00400000U
-#define  ERR_INVALID_CIDX_INC      0x00200000U
-#define  ERR_ITP_TIME_PAUSED       0x00100000U
-#define  ERR_CPL_OPCODE_0          0x00080000U
-#define  ERR_DROPPED_DB            0x00040000U
-#define  ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U
-#define  ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U
-#define  ERR_BAD_DB_PIDX3          0x00008000U
-#define  ERR_BAD_DB_PIDX2          0x00004000U
-#define  ERR_BAD_DB_PIDX1          0x00002000U
-#define  ERR_BAD_DB_PIDX0          0x00001000U
-#define  ERR_ING_PCIE_CHAN         0x00000800U
-#define  ERR_ING_CTXT_PRIO         0x00000400U
-#define  ERR_EGR_CTXT_PRIO         0x00000200U
-#define  DBFIFO_HP_INT             0x00000100U
-#define  DBFIFO_LP_INT             0x00000080U
-#define  REG_ADDRESS_ERR           0x00000040U
-#define  INGRESS_SIZE_ERR          0x00000020U
-#define  EGRESS_SIZE_ERR           0x00000010U
-#define  ERR_INV_CTXT3             0x00000008U
-#define  ERR_INV_CTXT2             0x00000004U
-#define  ERR_INV_CTXT1             0x00000002U
-#define  ERR_INV_CTXT0             0x00000001U
-
-#define SGE_INT_ENABLE3 0x1040
-#define SGE_FL_BUFFER_SIZE0 0x1044
-#define SGE_FL_BUFFER_SIZE1 0x1048
-#define SGE_FL_BUFFER_SIZE2 0x104c
-#define SGE_FL_BUFFER_SIZE3 0x1050
-#define SGE_FL_BUFFER_SIZE4 0x1054
-#define SGE_FL_BUFFER_SIZE5 0x1058
-#define SGE_FL_BUFFER_SIZE6 0x105c
-#define SGE_FL_BUFFER_SIZE7 0x1060
-#define SGE_FL_BUFFER_SIZE8 0x1064
-
-#define SGE_INGRESS_RX_THRESHOLD 0x10a0
-#define  THRESHOLD_0_MASK   0x3f000000U
-#define  THRESHOLD_0_SHIFT  24
-#define  THRESHOLD_0(x)     ((x) << THRESHOLD_0_SHIFT)
-#define  THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT)
-#define  THRESHOLD_1_MASK   0x003f0000U
-#define  THRESHOLD_1_SHIFT  16
-#define  THRESHOLD_1(x)     ((x) << THRESHOLD_1_SHIFT)
-#define  THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT)
-#define  THRESHOLD_2_MASK   0x00003f00U
-#define  THRESHOLD_2_SHIFT  8
-#define  THRESHOLD_2(x)     ((x) << THRESHOLD_2_SHIFT)
-#define  THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT)
-#define  THRESHOLD_3_MASK   0x0000003fU
-#define  THRESHOLD_3_SHIFT  0
-#define  THRESHOLD_3(x)     ((x) << THRESHOLD_3_SHIFT)
-#define  THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
-
-#define SGE_CONM_CTRL 0x1094
-#define  EGRTHRESHOLD_MASK   0x00003f00U
-#define  EGRTHRESHOLDshift   8
-#define  EGRTHRESHOLD(x)     ((x) << EGRTHRESHOLDshift)
-#define  EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
-
-#define EGRTHRESHOLDPACKING_MASK       0x3fU
-#define EGRTHRESHOLDPACKING_SHIFT      14
-#define EGRTHRESHOLDPACKING(x)         ((x) << EGRTHRESHOLDPACKING_SHIFT)
-#define EGRTHRESHOLDPACKING_GET(x)     (((x) >> EGRTHRESHOLDPACKING_SHIFT) & \
-                                         EGRTHRESHOLDPACKING_MASK)
-
-#define SGE_DBFIFO_STATUS 0x10a4
-#define  HP_INT_THRESH_SHIFT 28
-#define  HP_INT_THRESH_MASK  0xfU
-#define  HP_INT_THRESH(x)    ((x) << HP_INT_THRESH_SHIFT)
-#define  LP_INT_THRESH_SHIFT 12
-#define  LP_INT_THRESH_MASK  0xfU
-#define  LP_INT_THRESH(x)    ((x) << LP_INT_THRESH_SHIFT)
-
-#define SGE_DOORBELL_CONTROL 0x10a8
-#define  ENABLE_DROP        (1 << 13)
-
-#define S_NOCOALESCE    26
-#define V_NOCOALESCE(x) ((x) << S_NOCOALESCE)
-#define F_NOCOALESCE    V_NOCOALESCE(1U)
-
-#define SGE_TIMESTAMP_LO 0x1098
-#define SGE_TIMESTAMP_HI 0x109c
-#define S_TSVAL    0
-#define M_TSVAL    0xfffffffU
-#define GET_TSVAL(x) (((x) >> S_TSVAL) & M_TSVAL)
-
-#define SGE_TIMER_VALUE_0_AND_1 0x10b8
-#define  TIMERVALUE0_MASK   0xffff0000U
-#define  TIMERVALUE0_SHIFT  16
-#define  TIMERVALUE0(x)     ((x) << TIMERVALUE0_SHIFT)
-#define  TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT)
-#define  TIMERVALUE1_MASK   0x0000ffffU
-#define  TIMERVALUE1_SHIFT  0
-#define  TIMERVALUE1(x)     ((x) << TIMERVALUE1_SHIFT)
-#define  TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
-
-#define SGE_TIMER_VALUE_2_AND_3 0x10bc
-#define  TIMERVALUE2_MASK   0xffff0000U
-#define  TIMERVALUE2_SHIFT  16
-#define  TIMERVALUE2(x)     ((x) << TIMERVALUE2_SHIFT)
-#define  TIMERVALUE2_GET(x) (((x) & TIMERVALUE2_MASK) >> TIMERVALUE2_SHIFT)
-#define  TIMERVALUE3_MASK   0x0000ffffU
-#define  TIMERVALUE3_SHIFT  0
-#define  TIMERVALUE3(x)     ((x) << TIMERVALUE3_SHIFT)
-#define  TIMERVALUE3_GET(x) (((x) & TIMERVALUE3_MASK) >> TIMERVALUE3_SHIFT)
-
-#define SGE_TIMER_VALUE_4_AND_5 0x10c0
-#define  TIMERVALUE4_MASK   0xffff0000U
-#define  TIMERVALUE4_SHIFT  16
-#define  TIMERVALUE4(x)     ((x) << TIMERVALUE4_SHIFT)
-#define  TIMERVALUE4_GET(x) (((x) & TIMERVALUE4_MASK) >> TIMERVALUE4_SHIFT)
-#define  TIMERVALUE5_MASK   0x0000ffffU
-#define  TIMERVALUE5_SHIFT  0
-#define  TIMERVALUE5(x)     ((x) << TIMERVALUE5_SHIFT)
-#define  TIMERVALUE5_GET(x) (((x) & TIMERVALUE5_MASK) >> TIMERVALUE5_SHIFT)
-
-#define SGE_DEBUG_INDEX 0x10cc
-#define SGE_DEBUG_DATA_HIGH 0x10d0
-#define SGE_DEBUG_DATA_LOW 0x10d4
-#define SGE_DEBUG_DATA_LOW_INDEX_2     0x12c8
-#define SGE_DEBUG_DATA_LOW_INDEX_3     0x12cc
-#define SGE_DEBUG_DATA_HIGH_INDEX_10   0x12a8
-#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
-#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
+#define LP_COUNT_T5_S    0
+#define LP_COUNT_T5_M    0x3ffffU
+#define LP_COUNT_T5_G(x) (((x) >> LP_COUNT_T5_S) & LP_COUNT_T5_M)
+
+#define SGE_DOORBELL_CONTROL_A 0x10a8
+
+#define SGE_STAT_TOTAL_A       0x10e4
+#define SGE_STAT_MATCH_A       0x10e8
+#define SGE_STAT_CFG_A         0x10ec
+
+#define STATSOURCE_T5_S    9
+#define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S)
+
+#define SGE_DBFIFO_STATUS2_A 0x1118
+
+#define HP_INT_THRESH_T5_S    10
+#define HP_INT_THRESH_T5_M    0xfU
+#define HP_INT_THRESH_T5_V(x) ((x) << HP_INT_THRESH_T5_S)
+
+#define HP_COUNT_T5_S    0
+#define HP_COUNT_T5_M    0x3ffU
+#define HP_COUNT_T5_G(x) (((x) >> HP_COUNT_T5_S) & HP_COUNT_T5_M)
+
+#define ENABLE_DROP_S    13
+#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S)
+#define ENABLE_DROP_F    ENABLE_DROP_V(1U)
+
+#define DROPPED_DB_S    0
+#define DROPPED_DB_V(x) ((x) << DROPPED_DB_S)
+#define DROPPED_DB_F    DROPPED_DB_V(1U)
+
+#define SGE_CTXT_CMD_A 0x11fc
+#define SGE_DBQ_CTXT_BADDR_A 0x1084
+
+/* registers for module PCIE */
+#define PCIE_PF_CFG_A  0x40
+
+#define AIVEC_S    4
+#define AIVEC_M    0x3ffU
+#define AIVEC_V(x) ((x) << AIVEC_S)
+
+#define PCIE_PF_CLI_A  0x44
+#define PCIE_INT_CAUSE_A       0x3004
+
+#define UNXSPLCPLERR_S    29
+#define UNXSPLCPLERR_V(x) ((x) << UNXSPLCPLERR_S)
+#define UNXSPLCPLERR_F    UNXSPLCPLERR_V(1U)
+
+#define PCIEPINT_S    28
+#define PCIEPINT_V(x) ((x) << PCIEPINT_S)
+#define PCIEPINT_F    PCIEPINT_V(1U)
+
+#define PCIESINT_S    27
+#define PCIESINT_V(x) ((x) << PCIESINT_S)
+#define PCIESINT_F    PCIESINT_V(1U)
+
+#define RPLPERR_S    26
+#define RPLPERR_V(x) ((x) << RPLPERR_S)
+#define RPLPERR_F    RPLPERR_V(1U)
+
+#define RXWRPERR_S    25
+#define RXWRPERR_V(x) ((x) << RXWRPERR_S)
+#define RXWRPERR_F    RXWRPERR_V(1U)
+
+#define RXCPLPERR_S    24
+#define RXCPLPERR_V(x) ((x) << RXCPLPERR_S)
+#define RXCPLPERR_F    RXCPLPERR_V(1U)
+
+#define PIOTAGPERR_S    23
+#define PIOTAGPERR_V(x) ((x) << PIOTAGPERR_S)
+#define PIOTAGPERR_F    PIOTAGPERR_V(1U)
+
+#define MATAGPERR_S    22
+#define MATAGPERR_V(x) ((x) << MATAGPERR_S)
+#define MATAGPERR_F    MATAGPERR_V(1U)
+
+#define INTXCLRPERR_S    21
+#define INTXCLRPERR_V(x) ((x) << INTXCLRPERR_S)
+#define INTXCLRPERR_F    INTXCLRPERR_V(1U)
+
+#define FIDPERR_S    20
+#define FIDPERR_V(x) ((x) << FIDPERR_S)
+#define FIDPERR_F    FIDPERR_V(1U)
+
+#define CFGSNPPERR_S    19
+#define CFGSNPPERR_V(x) ((x) << CFGSNPPERR_S)
+#define CFGSNPPERR_F    CFGSNPPERR_V(1U)
+
+#define HRSPPERR_S    18
+#define HRSPPERR_V(x) ((x) << HRSPPERR_S)
+#define HRSPPERR_F    HRSPPERR_V(1U)
+
+#define HREQPERR_S    17
+#define HREQPERR_V(x) ((x) << HREQPERR_S)
+#define HREQPERR_F    HREQPERR_V(1U)
+
+#define HCNTPERR_S    16
+#define HCNTPERR_V(x) ((x) << HCNTPERR_S)
+#define HCNTPERR_F    HCNTPERR_V(1U)
+
+#define DRSPPERR_S    15
+#define DRSPPERR_V(x) ((x) << DRSPPERR_S)
+#define DRSPPERR_F    DRSPPERR_V(1U)
+
+#define DREQPERR_S    14
+#define DREQPERR_V(x) ((x) << DREQPERR_S)
+#define DREQPERR_F    DREQPERR_V(1U)
+
+#define DCNTPERR_S    13
+#define DCNTPERR_V(x) ((x) << DCNTPERR_S)
+#define DCNTPERR_F    DCNTPERR_V(1U)
+
+#define CRSPPERR_S    12
+#define CRSPPERR_V(x) ((x) << CRSPPERR_S)
+#define CRSPPERR_F    CRSPPERR_V(1U)
+
+#define CREQPERR_S    11
+#define CREQPERR_V(x) ((x) << CREQPERR_S)
+#define CREQPERR_F    CREQPERR_V(1U)
+
+#define CCNTPERR_S    10
+#define CCNTPERR_V(x) ((x) << CCNTPERR_S)
+#define CCNTPERR_F    CCNTPERR_V(1U)
+
+#define TARTAGPERR_S    9
+#define TARTAGPERR_V(x) ((x) << TARTAGPERR_S)
+#define TARTAGPERR_F    TARTAGPERR_V(1U)
+
+#define PIOREQPERR_S    8
+#define PIOREQPERR_V(x) ((x) << PIOREQPERR_S)
+#define PIOREQPERR_F    PIOREQPERR_V(1U)
+
+#define PIOCPLPERR_S    7
+#define PIOCPLPERR_V(x) ((x) << PIOCPLPERR_S)
+#define PIOCPLPERR_F    PIOCPLPERR_V(1U)
+
+#define MSIXDIPERR_S    6
+#define MSIXDIPERR_V(x) ((x) << MSIXDIPERR_S)
+#define MSIXDIPERR_F    MSIXDIPERR_V(1U)
+
+#define MSIXDATAPERR_S    5
+#define MSIXDATAPERR_V(x) ((x) << MSIXDATAPERR_S)
+#define MSIXDATAPERR_F    MSIXDATAPERR_V(1U)
+
+#define MSIXADDRHPERR_S    4
+#define MSIXADDRHPERR_V(x) ((x) << MSIXADDRHPERR_S)
+#define MSIXADDRHPERR_F    MSIXADDRHPERR_V(1U)
+
+#define MSIXADDRLPERR_S    3
+#define MSIXADDRLPERR_V(x) ((x) << MSIXADDRLPERR_S)
+#define MSIXADDRLPERR_F    MSIXADDRLPERR_V(1U)
+
+#define MSIDATAPERR_S    2
+#define MSIDATAPERR_V(x) ((x) << MSIDATAPERR_S)
+#define MSIDATAPERR_F    MSIDATAPERR_V(1U)
+
+#define MSIADDRHPERR_S    1
+#define MSIADDRHPERR_V(x) ((x) << MSIADDRHPERR_S)
+#define MSIADDRHPERR_F    MSIADDRHPERR_V(1U)
+
+#define MSIADDRLPERR_S    0
+#define MSIADDRLPERR_V(x) ((x) << MSIADDRLPERR_S)
+#define MSIADDRLPERR_F    MSIADDRLPERR_V(1U)
+
+#define READRSPERR_S    29
+#define READRSPERR_V(x) ((x) << READRSPERR_S)
+#define READRSPERR_F    READRSPERR_V(1U)
+
+#define TRGT1GRPPERR_S    28
+#define TRGT1GRPPERR_V(x) ((x) << TRGT1GRPPERR_S)
+#define TRGT1GRPPERR_F    TRGT1GRPPERR_V(1U)
+
+#define IPSOTPERR_S    27
+#define IPSOTPERR_V(x) ((x) << IPSOTPERR_S)
+#define IPSOTPERR_F    IPSOTPERR_V(1U)
+
+#define IPRETRYPERR_S    26
+#define IPRETRYPERR_V(x) ((x) << IPRETRYPERR_S)
+#define IPRETRYPERR_F    IPRETRYPERR_V(1U)
+
+#define IPRXDATAGRPPERR_S    25
+#define IPRXDATAGRPPERR_V(x) ((x) << IPRXDATAGRPPERR_S)
+#define IPRXDATAGRPPERR_F    IPRXDATAGRPPERR_V(1U)
+
+#define IPRXHDRGRPPERR_S    24
+#define IPRXHDRGRPPERR_V(x) ((x) << IPRXHDRGRPPERR_S)
+#define IPRXHDRGRPPERR_F    IPRXHDRGRPPERR_V(1U)
+
+#define MAGRPPERR_S    22
+#define MAGRPPERR_V(x) ((x) << MAGRPPERR_S)
+#define MAGRPPERR_F    MAGRPPERR_V(1U)
+
+#define VFIDPERR_S    21
+#define VFIDPERR_V(x) ((x) << VFIDPERR_S)
+#define VFIDPERR_F    VFIDPERR_V(1U)
+
+#define HREQWRPERR_S    16
+#define HREQWRPERR_V(x) ((x) << HREQWRPERR_S)
+#define HREQWRPERR_F    HREQWRPERR_V(1U)
+
+#define DREQWRPERR_S    13
+#define DREQWRPERR_V(x) ((x) << DREQWRPERR_S)
+#define DREQWRPERR_F    DREQWRPERR_V(1U)
+
+#define CREQRDPERR_S    11
+#define CREQRDPERR_V(x) ((x) << CREQRDPERR_S)
+#define CREQRDPERR_F    CREQRDPERR_V(1U)
+
+#define MSTTAGQPERR_S    10
+#define MSTTAGQPERR_V(x) ((x) << MSTTAGQPERR_S)
+#define MSTTAGQPERR_F    MSTTAGQPERR_V(1U)
+
+#define PIOREQGRPPERR_S    8
+#define PIOREQGRPPERR_V(x) ((x) << PIOREQGRPPERR_S)
+#define PIOREQGRPPERR_F    PIOREQGRPPERR_V(1U)
+
+#define PIOCPLGRPPERR_S    7
+#define PIOCPLGRPPERR_V(x) ((x) << PIOCPLGRPPERR_S)
+#define PIOCPLGRPPERR_F    PIOCPLGRPPERR_V(1U)
+
+#define MSIXSTIPERR_S    2
+#define MSIXSTIPERR_V(x) ((x) << MSIXSTIPERR_S)
+#define MSIXSTIPERR_F    MSIXSTIPERR_V(1U)
+
+#define MSTTIMEOUTPERR_S    1
+#define MSTTIMEOUTPERR_V(x) ((x) << MSTTIMEOUTPERR_S)
+#define MSTTIMEOUTPERR_F    MSTTIMEOUTPERR_V(1U)
+
+#define MSTGRPPERR_S    0
+#define MSTGRPPERR_V(x) ((x) << MSTGRPPERR_S)
+#define MSTGRPPERR_F    MSTGRPPERR_V(1U)
+
+#define PCIE_NONFAT_ERR_A      0x3010
+#define PCIE_CFG_SPACE_REQ_A   0x3060
+#define PCIE_CFG_SPACE_DATA_A  0x3064
+#define PCIE_MEM_ACCESS_BASE_WIN_A 0x3068
+
+#define PCIEOFST_S    10
+#define PCIEOFST_M    0x3fffffU
+#define PCIEOFST_G(x) (((x) >> PCIEOFST_S) & PCIEOFST_M)
+
+#define BIR_S    8
+#define BIR_M    0x3U
+#define BIR_V(x) ((x) << BIR_S)
+#define BIR_G(x) (((x) >> BIR_S) & BIR_M)
+
+#define WINDOW_S    0
+#define WINDOW_M    0xffU
+#define WINDOW_V(x) ((x) << WINDOW_S)
+#define WINDOW_G(x) (((x) >> WINDOW_S) & WINDOW_M)
+
+#define PCIE_MEM_ACCESS_OFFSET_A 0x306c
+
+#define ENABLE_S    30
+#define ENABLE_V(x) ((x) << ENABLE_S)
+#define ENABLE_F    ENABLE_V(1U)
+
+#define LOCALCFG_S    28
+#define LOCALCFG_V(x) ((x) << LOCALCFG_S)
+#define LOCALCFG_F    LOCALCFG_V(1U)
+
+#define FUNCTION_S    12
+#define FUNCTION_V(x) ((x) << FUNCTION_S)
+
+#define REGISTER_S    0
+#define REGISTER_V(x) ((x) << REGISTER_S)
+
+#define PFNUM_S    0
+#define PFNUM_V(x) ((x) << PFNUM_S)
+
+#define PCIE_FW_A 0x30b8
+
+#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908
+
+#define RNPP_S    31
+#define RNPP_V(x) ((x) << RNPP_S)
+#define RNPP_F    RNPP_V(1U)
+
+#define RPCP_S    29
+#define RPCP_V(x) ((x) << RPCP_S)
+#define RPCP_F    RPCP_V(1U)
+
+#define RCIP_S    27
+#define RCIP_V(x) ((x) << RCIP_S)
+#define RCIP_F    RCIP_V(1U)
+
+#define RCCP_S    26
+#define RCCP_V(x) ((x) << RCCP_S)
+#define RCCP_F    RCCP_V(1U)
+
+#define RFTP_S    23
+#define RFTP_V(x) ((x) << RFTP_S)
+#define RFTP_F    RFTP_V(1U)
+
+#define PTRP_S    20
+#define PTRP_V(x) ((x) << PTRP_S)
+#define PTRP_F    PTRP_V(1U)
 
-#define S_HP_INT_THRESH    28
-#define M_HP_INT_THRESH 0xfU
-#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
-#define S_LP_INT_THRESH_T5    18
-#define V_LP_INT_THRESH_T5(x) ((x) << S_LP_INT_THRESH_T5)
-#define M_LP_COUNT_T5    0x3ffffU
-#define G_LP_COUNT_T5(x) (((x) >> S_LP_COUNT) & M_LP_COUNT_T5)
-#define M_HP_COUNT 0x7ffU
-#define S_HP_COUNT 16
-#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
-#define S_LP_INT_THRESH    12
-#define M_LP_INT_THRESH 0xfU
-#define M_LP_INT_THRESH_T5    0xfffU
-#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
-#define M_LP_COUNT 0x7ffU
-#define S_LP_COUNT 0
-#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
-#define A_SGE_DBFIFO_STATUS 0x10a4
-
-#define SGE_STAT_TOTAL 0x10e4
-#define SGE_STAT_MATCH 0x10e8
-
-#define SGE_STAT_CFG   0x10ec
-#define S_STATSOURCE_T5    9
-#define STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5)
-
-#define SGE_DBFIFO_STATUS2 0x1118
-#define M_HP_COUNT_T5    0x3ffU
-#define G_HP_COUNT_T5(x) ((x)  & M_HP_COUNT_T5)
-#define S_HP_INT_THRESH_T5    10
-#define M_HP_INT_THRESH_T5    0xfU
-#define V_HP_INT_THRESH_T5(x) ((x) << S_HP_INT_THRESH_T5)
-
-#define S_ENABLE_DROP    13
-#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
-#define F_ENABLE_DROP    V_ENABLE_DROP(1U)
-#define S_DROPPED_DB 0
-#define V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
-#define F_DROPPED_DB V_DROPPED_DB(1U)
-#define A_SGE_DOORBELL_CONTROL 0x10a8
-
-#define A_SGE_CTXT_CMD 0x11fc
-#define A_SGE_DBQ_CTXT_BADDR 0x1084
-
-#define PCIE_PF_CFG 0x40
-#define  AIVEC(x)      ((x) << 4)
-#define  AIVEC_MASK    0x3ffU
-
-#define PCIE_PF_CLI 0x44
-#define PCIE_INT_CAUSE 0x3004
-#define  UNXSPLCPLERR  0x20000000U
-#define  PCIEPINT      0x10000000U
-#define  PCIESINT      0x08000000U
-#define  RPLPERR       0x04000000U
-#define  RXWRPERR      0x02000000U
-#define  RXCPLPERR     0x01000000U
-#define  PIOTAGPERR    0x00800000U
-#define  MATAGPERR     0x00400000U
-#define  INTXCLRPERR   0x00200000U
-#define  FIDPERR       0x00100000U
-#define  CFGSNPPERR    0x00080000U
-#define  HRSPPERR      0x00040000U
-#define  HREQPERR      0x00020000U
-#define  HCNTPERR      0x00010000U
-#define  DRSPPERR      0x00008000U
-#define  DREQPERR      0x00004000U
-#define  DCNTPERR      0x00002000U
-#define  CRSPPERR      0x00001000U
-#define  CREQPERR      0x00000800U
-#define  CCNTPERR      0x00000400U
-#define  TARTAGPERR    0x00000200U
-#define  PIOREQPERR    0x00000100U
-#define  PIOCPLPERR    0x00000080U
-#define  MSIXDIPERR    0x00000040U
-#define  MSIXDATAPERR  0x00000020U
-#define  MSIXADDRHPERR 0x00000010U
-#define  MSIXADDRLPERR 0x00000008U
-#define  MSIDATAPERR   0x00000004U
-#define  MSIADDRHPERR  0x00000002U
-#define  MSIADDRLPERR  0x00000001U
-
-#define  READRSPERR      0x20000000U
-#define  TRGT1GRPPERR    0x10000000U
-#define  IPSOTPERR       0x08000000U
-#define  IPRXDATAGRPPERR 0x02000000U
-#define  IPRXHDRGRPPERR  0x01000000U
-#define  MAGRPPERR       0x00400000U
-#define  VFIDPERR        0x00200000U
-#define  HREQWRPERR      0x00010000U
-#define  DREQWRPERR      0x00002000U
-#define  MSTTAGQPERR     0x00000400U
-#define  PIOREQGRPPERR   0x00000100U
-#define  PIOCPLGRPPERR   0x00000080U
-#define  MSIXSTIPERR     0x00000004U
-#define  MSTTIMEOUTPERR  0x00000002U
-#define  MSTGRPPERR      0x00000001U
-
-#define PCIE_NONFAT_ERR 0x3010
-#define PCIE_CFG_SPACE_REQ 0x3060
-#define PCIE_CFG_SPACE_DATA 0x3064
-#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
-#define S_PCIEOFST       10
-#define M_PCIEOFST       0x3fffffU
-#define GET_PCIEOFST(x)  (((x) >> S_PCIEOFST) & M_PCIEOFST)
-#define  PCIEOFST_MASK   0xfffffc00U
-#define  BIR_MASK        0x00000300U
-#define  BIR_SHIFT       8
-#define  BIR(x)          ((x) << BIR_SHIFT)
-#define  WINDOW_MASK     0x000000ffU
-#define  WINDOW_SHIFT    0
-#define  WINDOW(x)       ((x) << WINDOW_SHIFT)
-#define  GET_WINDOW(x)  (((x) >> WINDOW_SHIFT) & WINDOW_MASK)
-#define PCIE_MEM_ACCESS_OFFSET 0x306c
-#define ENABLE (1U << 30)
-#define FUNCTION(x) ((x) << 12)
-#define F_LOCALCFG    (1U << 28)
-
-#define S_PFNUM    0
-#define V_PFNUM(x) ((x) << S_PFNUM)
-
-#define PCIE_FW 0x30b8
-#define  PCIE_FW_ERR           0x80000000U
-#define  PCIE_FW_INIT          0x40000000U
-#define  PCIE_FW_HALT          0x20000000U
-#define  PCIE_FW_MASTER_VLD    0x00008000U
-#define  PCIE_FW_MASTER(x)     ((x) << 12)
-#define  PCIE_FW_MASTER_MASK   0x7
-#define  PCIE_FW_MASTER_GET(x) (((x) >> 12) & PCIE_FW_MASTER_MASK)
-
-#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
-#define  RNPP 0x80000000U
-#define  RPCP 0x20000000U
-#define  RCIP 0x08000000U
-#define  RCCP 0x04000000U
-#define  RFTP 0x00800000U
-#define  PTRP 0x00100000U
-
-#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4
-#define  TPCP 0x40000000U
-#define  TNPP 0x20000000U
-#define  TFTP 0x10000000U
-#define  TCAP 0x08000000U
-#define  TCIP 0x04000000U
-#define  RCAP 0x02000000U
-#define  PLUP 0x00800000U
-#define  PLDN 0x00400000U
-#define  OTDD 0x00200000U
-#define  GTRP 0x00100000U
-#define  RDPE 0x00040000U
-#define  TDCE 0x00020000U
-#define  TDUE 0x00010000U
-
-#define MC_INT_CAUSE 0x7518
-#define MC_P_INT_CAUSE 0x41318
-#define  ECC_UE_INT_CAUSE 0x00000004U
-#define  ECC_CE_INT_CAUSE 0x00000002U
-#define  PERR_INT_CAUSE   0x00000001U
-
-#define MC_ECC_STATUS 0x751c
-#define MC_P_ECC_STATUS 0x4131c
-#define  ECC_CECNT_MASK   0xffff0000U
-#define  ECC_CECNT_SHIFT  16
-#define  ECC_CECNT(x)     ((x) << ECC_CECNT_SHIFT)
-#define  ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT)
-#define  ECC_UECNT_MASK   0x0000ffffU
-#define  ECC_UECNT_SHIFT  0
-#define  ECC_UECNT(x)     ((x) << ECC_UECNT_SHIFT)
-#define  ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT)
-
-#define MC_BIST_CMD 0x7600
-#define  START_BIST          0x80000000U
-#define  BIST_CMD_GAP_MASK   0x0000ff00U
-#define  BIST_CMD_GAP_SHIFT  8
-#define  BIST_CMD_GAP(x)     ((x) << BIST_CMD_GAP_SHIFT)
-#define  BIST_OPCODE_MASK    0x00000003U
-#define  BIST_OPCODE_SHIFT   0
-#define  BIST_OPCODE(x)      ((x) << BIST_OPCODE_SHIFT)
-
-#define MC_BIST_CMD_ADDR 0x7604
-#define MC_BIST_CMD_LEN 0x7608
-#define MC_BIST_DATA_PATTERN 0x760c
-#define  BIST_DATA_TYPE_MASK   0x0000000fU
-#define  BIST_DATA_TYPE_SHIFT  0
-#define  BIST_DATA_TYPE(x)     ((x) << BIST_DATA_TYPE_SHIFT)
-
-#define MC_BIST_STATUS_RDATA 0x7688
+#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A 0x59a4
 
+#define TPCP_S    30
+#define TPCP_V(x) ((x) << TPCP_S)
+#define TPCP_F    TPCP_V(1U)
+
+#define TNPP_S    29
+#define TNPP_V(x) ((x) << TNPP_S)
+#define TNPP_F    TNPP_V(1U)
+
+#define TFTP_S    28
+#define TFTP_V(x) ((x) << TFTP_S)
+#define TFTP_F    TFTP_V(1U)
+
+#define TCAP_S    27
+#define TCAP_V(x) ((x) << TCAP_S)
+#define TCAP_F    TCAP_V(1U)
+
+#define TCIP_S    26
+#define TCIP_V(x) ((x) << TCIP_S)
+#define TCIP_F    TCIP_V(1U)
+
+#define RCAP_S    25
+#define RCAP_V(x) ((x) << RCAP_S)
+#define RCAP_F    RCAP_V(1U)
+
+#define PLUP_S    23
+#define PLUP_V(x) ((x) << PLUP_S)
+#define PLUP_F    PLUP_V(1U)
+
+#define PLDN_S    22
+#define PLDN_V(x) ((x) << PLDN_S)
+#define PLDN_F    PLDN_V(1U)
+
+#define OTDD_S    21
+#define OTDD_V(x) ((x) << OTDD_S)
+#define OTDD_F    OTDD_V(1U)
+
+#define GTRP_S    20
+#define GTRP_V(x) ((x) << GTRP_S)
+#define GTRP_F    GTRP_V(1U)
+
+#define RDPE_S    18
+#define RDPE_V(x) ((x) << RDPE_S)
+#define RDPE_F    RDPE_V(1U)
+
+#define TDCE_S    17
+#define TDCE_V(x) ((x) << TDCE_S)
+#define TDCE_F    TDCE_V(1U)
+
+#define TDUE_S    16
+#define TDUE_V(x) ((x) << TDUE_S)
+#define TDUE_F    TDUE_V(1U)
+
+/* registers for module MC */
+#define MC_INT_CAUSE_A         0x7518
+#define MC_P_INT_CAUSE_A       0x41318
+
+#define ECC_UE_INT_CAUSE_S    2
+#define ECC_UE_INT_CAUSE_V(x) ((x) << ECC_UE_INT_CAUSE_S)
+#define ECC_UE_INT_CAUSE_F    ECC_UE_INT_CAUSE_V(1U)
+
+#define ECC_CE_INT_CAUSE_S    1
+#define ECC_CE_INT_CAUSE_V(x) ((x) << ECC_CE_INT_CAUSE_S)
+#define ECC_CE_INT_CAUSE_F    ECC_CE_INT_CAUSE_V(1U)
+
+#define PERR_INT_CAUSE_S    0
+#define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S)
+#define PERR_INT_CAUSE_F    PERR_INT_CAUSE_V(1U)
+
+#define MC_ECC_STATUS_A                0x751c
+#define MC_P_ECC_STATUS_A      0x4131c
+
+#define ECC_CECNT_S    16
+#define ECC_CECNT_M    0xffffU
+#define ECC_CECNT_V(x) ((x) << ECC_CECNT_S)
+#define ECC_CECNT_G(x) (((x) >> ECC_CECNT_S) & ECC_CECNT_M)
+
+#define ECC_UECNT_S    0
+#define ECC_UECNT_M    0xffffU
+#define ECC_UECNT_V(x) ((x) << ECC_UECNT_S)
+#define ECC_UECNT_G(x) (((x) >> ECC_UECNT_S) & ECC_UECNT_M)
+
+#define MC_BIST_CMD_A 0x7600
+
+#define START_BIST_S    31
+#define START_BIST_V(x) ((x) << START_BIST_S)
+#define START_BIST_F    START_BIST_V(1U)
+
+#define BIST_CMD_GAP_S    8
+#define BIST_CMD_GAP_V(x) ((x) << BIST_CMD_GAP_S)
+
+#define BIST_OPCODE_S    0
+#define BIST_OPCODE_V(x) ((x) << BIST_OPCODE_S)
+
+#define MC_BIST_CMD_ADDR_A 0x7604
+#define MC_BIST_CMD_LEN_A 0x7608
+#define MC_BIST_DATA_PATTERN_A 0x760c
+
+#define MC_BIST_STATUS_RDATA_A 0x7688
+
+/* registers for module MA */
 #define MA_EDRAM0_BAR_A 0x77c0
 
 #define EDRAM0_SIZE_S    0
 #define EXT_MEM0_ENABLE_V(x) ((x) << EXT_MEM0_ENABLE_S)
 #define EXT_MEM0_ENABLE_F    EXT_MEM0_ENABLE_V(1U)
 
-#define MA_INT_CAUSE 0x77e0
-#define  MEM_PERR_INT_CAUSE 0x00000002U
-#define  MEM_WRAP_INT_CAUSE 0x00000001U
-
-#define MA_INT_WRAP_STATUS 0x77e4
-#define  MEM_WRAP_ADDRESS_MASK   0xfffffff0U
-#define  MEM_WRAP_ADDRESS_SHIFT  4
-#define  MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT)
-#define  MEM_WRAP_CLIENT_NUM_MASK   0x0000000fU
-#define  MEM_WRAP_CLIENT_NUM_SHIFT  0
-#define  MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
-#define MA_PCIE_FW 0x30b8
-#define MA_PARITY_ERROR_STATUS 0x77f4
-#define MA_PARITY_ERROR_STATUS2 0x7804
-
-#define EDC_0_BASE_ADDR 0x7900
-
-#define EDC_BIST_CMD 0x7904
-#define EDC_BIST_CMD_ADDR 0x7908
-#define EDC_BIST_CMD_LEN 0x790c
-#define EDC_BIST_DATA_PATTERN 0x7910
-#define EDC_BIST_STATUS_RDATA 0x7928
-#define EDC_INT_CAUSE 0x7978
-#define  ECC_UE_PAR     0x00000020U
-#define  ECC_CE_PAR     0x00000010U
-#define  PERR_PAR_CAUSE 0x00000008U
-
-#define EDC_ECC_STATUS 0x797c
-
-#define EDC_1_BASE_ADDR 0x7980
-
-#define CIM_BOOT_CFG 0x7b00
-#define  BOOTADDR_MASK 0xffffff00U
-#define  UPCRST        0x1U
-
-#define CIM_PF_MAILBOX_DATA 0x240
-#define CIM_PF_MAILBOX_CTRL 0x280
-#define  MBMSGVALID     0x00000008U
-#define  MBINTREQ       0x00000004U
-#define  MBOWNER_MASK   0x00000003U
-#define  MBOWNER_SHIFT  0
-#define  MBOWNER(x)     ((x) << MBOWNER_SHIFT)
-#define  MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT)
-
-#define CIM_PF_HOST_INT_ENABLE 0x288
-#define  MBMSGRDYINTEN(x) ((x) << 19)
-
-#define CIM_PF_HOST_INT_CAUSE 0x28c
-#define  MBMSGRDYINT 0x00080000U
-
-#define CIM_HOST_INT_CAUSE 0x7b2c
-#define  TIEQOUTPARERRINT  0x00100000U
-#define  TIEQINPARERRINT   0x00080000U
-#define  MBHOSTPARERR      0x00040000U
-#define  MBUPPARERR        0x00020000U
-#define  IBQPARERR         0x0001f800U
-#define  IBQTP0PARERR      0x00010000U
-#define  IBQTP1PARERR      0x00008000U
-#define  IBQULPPARERR      0x00004000U
-#define  IBQSGELOPARERR    0x00002000U
-#define  IBQSGEHIPARERR    0x00001000U
-#define  IBQNCSIPARERR     0x00000800U
-#define  OBQPARERR         0x000007e0U
-#define  OBQULP0PARERR     0x00000400U
-#define  OBQULP1PARERR     0x00000200U
-#define  OBQULP2PARERR     0x00000100U
-#define  OBQULP3PARERR     0x00000080U
-#define  OBQSGEPARERR      0x00000040U
-#define  OBQNCSIPARERR     0x00000020U
-#define  PREFDROPINT       0x00000002U
-#define  UPACCNONZERO      0x00000001U
-
-#define CIM_HOST_UPACC_INT_CAUSE 0x7b34
-#define  EEPROMWRINT      0x40000000U
-#define  TIMEOUTMAINT     0x20000000U
-#define  TIMEOUTINT       0x10000000U
-#define  RSPOVRLOOKUPINT  0x08000000U
-#define  REQOVRLOOKUPINT  0x04000000U
-#define  BLKWRPLINT       0x02000000U
-#define  BLKRDPLINT       0x01000000U
-#define  SGLWRPLINT       0x00800000U
-#define  SGLRDPLINT       0x00400000U
-#define  BLKWRCTLINT      0x00200000U
-#define  BLKRDCTLINT      0x00100000U
-#define  SGLWRCTLINT      0x00080000U
-#define  SGLRDCTLINT      0x00040000U
-#define  BLKWREEPROMINT   0x00020000U
-#define  BLKRDEEPROMINT   0x00010000U
-#define  SGLWREEPROMINT   0x00008000U
-#define  SGLRDEEPROMINT   0x00004000U
-#define  BLKWRFLASHINT    0x00002000U
-#define  BLKRDFLASHINT    0x00001000U
-#define  SGLWRFLASHINT    0x00000800U
-#define  SGLRDFLASHINT    0x00000400U
-#define  BLKWRBOOTINT     0x00000200U
-#define  BLKRDBOOTINT     0x00000100U
-#define  SGLWRBOOTINT     0x00000080U
-#define  SGLRDBOOTINT     0x00000040U
-#define  ILLWRBEINT       0x00000020U
-#define  ILLRDBEINT       0x00000010U
-#define  ILLRDINT         0x00000008U
-#define  ILLWRINT         0x00000004U
-#define  ILLTRANSINT      0x00000002U
-#define  RSVDSPACEINT     0x00000001U
-
-#define TP_OUT_CONFIG 0x7d04
-#define  VLANEXTENABLE_MASK  0x0000f000U
-#define  VLANEXTENABLE_SHIFT 12
-
-#define TP_GLOBAL_CONFIG 0x7d08
-#define  FIVETUPLELOOKUP_SHIFT  17
-#define  FIVETUPLELOOKUP_MASK   0x00060000U
-#define  FIVETUPLELOOKUP(x)     ((x) << FIVETUPLELOOKUP_SHIFT)
-#define  FIVETUPLELOOKUP_GET(x) (((x) & FIVETUPLELOOKUP_MASK) >> \
-                               FIVETUPLELOOKUP_SHIFT)
-
-#define TP_PARA_REG2 0x7d68
-#define  MAXRXDATA_MASK    0xffff0000U
-#define  MAXRXDATA_SHIFT   16
-#define  MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT)
-
-#define TP_TIMER_RESOLUTION 0x7d90
-#define  TIMERRESOLUTION_MASK   0x00ff0000U
-#define  TIMERRESOLUTION_SHIFT  16
-#define  TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
-#define  DELAYEDACKRESOLUTION_MASK 0x000000ffU
-#define  DELAYEDACKRESOLUTION_SHIFT     0
-#define  DELAYEDACKRESOLUTION_GET(x) \
-       (((x) & DELAYEDACKRESOLUTION_MASK) >> DELAYEDACKRESOLUTION_SHIFT)
-
-#define TP_SHIFT_CNT 0x7dc0
-#define  SYNSHIFTMAX_SHIFT         24
-#define  SYNSHIFTMAX_MASK          0xff000000U
-#define  SYNSHIFTMAX(x)            ((x) << SYNSHIFTMAX_SHIFT)
-#define  SYNSHIFTMAX_GET(x)        (((x) & SYNSHIFTMAX_MASK) >> \
-                                  SYNSHIFTMAX_SHIFT)
-#define  RXTSHIFTMAXR1_SHIFT       20
-#define  RXTSHIFTMAXR1_MASK        0x00f00000U
-#define  RXTSHIFTMAXR1(x)          ((x) << RXTSHIFTMAXR1_SHIFT)
-#define  RXTSHIFTMAXR1_GET(x)      (((x) & RXTSHIFTMAXR1_MASK) >> \
-                                  RXTSHIFTMAXR1_SHIFT)
-#define  RXTSHIFTMAXR2_SHIFT       16
-#define  RXTSHIFTMAXR2_MASK        0x000f0000U
-#define  RXTSHIFTMAXR2(x)          ((x) << RXTSHIFTMAXR2_SHIFT)
-#define  RXTSHIFTMAXR2_GET(x)      (((x) & RXTSHIFTMAXR2_MASK) >> \
-                                  RXTSHIFTMAXR2_SHIFT)
-#define  PERSHIFTBACKOFFMAX_SHIFT  12
-#define  PERSHIFTBACKOFFMAX_MASK   0x0000f000U
-#define  PERSHIFTBACKOFFMAX(x)     ((x) << PERSHIFTBACKOFFMAX_SHIFT)
-#define  PERSHIFTBACKOFFMAX_GET(x) (((x) & PERSHIFTBACKOFFMAX_MASK) >> \
-                                  PERSHIFTBACKOFFMAX_SHIFT)
-#define  PERSHIFTMAX_SHIFT         8
-#define  PERSHIFTMAX_MASK          0x00000f00U
-#define  PERSHIFTMAX(x)            ((x) << PERSHIFTMAX_SHIFT)
-#define  PERSHIFTMAX_GET(x)        (((x) & PERSHIFTMAX_MASK) >> \
-                                  PERSHIFTMAX_SHIFT)
-#define  KEEPALIVEMAXR1_SHIFT      4
-#define  KEEPALIVEMAXR1_MASK       0x000000f0U
-#define  KEEPALIVEMAXR1(x)         ((x) << KEEPALIVEMAXR1_SHIFT)
-#define  KEEPALIVEMAXR1_GET(x)     (((x) & KEEPALIVEMAXR1_MASK) >> \
-                                  KEEPALIVEMAXR1_SHIFT)
-#define KEEPALIVEMAXR2_SHIFT       0
-#define KEEPALIVEMAXR2_MASK        0x0000000fU
-#define KEEPALIVEMAXR2(x)          ((x) << KEEPALIVEMAXR2_SHIFT)
-#define KEEPALIVEMAXR2_GET(x)      (((x) & KEEPALIVEMAXR2_MASK) >> \
-                                  KEEPALIVEMAXR2_SHIFT)
-
-#define TP_CCTRL_TABLE 0x7ddc
-#define TP_MTU_TABLE 0x7de4
-#define  MTUINDEX_MASK   0xff000000U
-#define  MTUINDEX_SHIFT  24
-#define  MTUINDEX(x)     ((x) << MTUINDEX_SHIFT)
-#define  MTUWIDTH_MASK   0x000f0000U
-#define  MTUWIDTH_SHIFT  16
-#define  MTUWIDTH(x)     ((x) << MTUWIDTH_SHIFT)
-#define  MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT)
-#define  MTUVALUE_MASK   0x00003fffU
-#define  MTUVALUE_SHIFT  0
-#define  MTUVALUE(x)     ((x) << MTUVALUE_SHIFT)
-#define  MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT)
-
-#define TP_RSS_LKP_TABLE 0x7dec
-#define  LKPTBLROWVLD        0x80000000U
-#define  LKPTBLQUEUE1_MASK   0x000ffc00U
-#define  LKPTBLQUEUE1_SHIFT  10
-#define  LKPTBLQUEUE1(x)     ((x) << LKPTBLQUEUE1_SHIFT)
-#define  LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT)
-#define  LKPTBLQUEUE0_MASK   0x000003ffU
-#define  LKPTBLQUEUE0_SHIFT  0
-#define  LKPTBLQUEUE0(x)     ((x) << LKPTBLQUEUE0_SHIFT)
-#define  LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT)
-
-#define TP_PIO_ADDR 0x7e40
-#define TP_PIO_DATA 0x7e44
-#define TP_MIB_INDEX 0x7e50
-#define TP_MIB_DATA 0x7e54
-#define TP_INT_CAUSE 0x7e74
-#define  FLMTXFLSTEMPTY 0x40000000U
-
-#define TP_VLAN_PRI_MAP 0x140
-#define  FRAGMENTATION_SHIFT 9
-#define  FRAGMENTATION_MASK  0x00000200U
-#define  MPSHITTYPE_MASK     0x00000100U
-#define  MACMATCH_MASK       0x00000080U
-#define  ETHERTYPE_MASK      0x00000040U
-#define  PROTOCOL_MASK       0x00000020U
-#define  TOS_MASK            0x00000010U
-#define  VLAN_MASK           0x00000008U
-#define  VNIC_ID_MASK        0x00000004U
-#define  PORT_MASK           0x00000002U
-#define  FCOE_SHIFT          0
-#define  FCOE_MASK           0x00000001U
-
-#define TP_INGRESS_CONFIG 0x141
-#define  VNIC                0x00000800U
-#define  CSUM_HAS_PSEUDO_HDR 0x00000400U
-#define  RM_OVLAN            0x00000200U
-#define  LOOKUPEVERYPKT      0x00000100U
-
-#define TP_MIB_MAC_IN_ERR_0 0x0
-#define TP_MIB_TCP_OUT_RST 0xc
-#define TP_MIB_TCP_IN_SEG_HI 0x10
-#define TP_MIB_TCP_IN_SEG_LO 0x11
-#define TP_MIB_TCP_OUT_SEG_HI 0x12
-#define TP_MIB_TCP_OUT_SEG_LO 0x13
-#define TP_MIB_TCP_RXT_SEG_HI 0x14
-#define TP_MIB_TCP_RXT_SEG_LO 0x15
-#define TP_MIB_TNL_CNG_DROP_0 0x18
-#define TP_MIB_TCP_V6IN_ERR_0 0x28
-#define TP_MIB_TCP_V6OUT_RST 0x2c
-#define TP_MIB_OFD_ARP_DROP 0x36
-#define TP_MIB_TNL_DROP_0 0x44
-#define TP_MIB_OFD_VLN_DROP_0 0x58
-
-#define ULP_TX_INT_CAUSE 0x8dcc
-#define  PBL_BOUND_ERR_CH3 0x80000000U
-#define  PBL_BOUND_ERR_CH2 0x40000000U
-#define  PBL_BOUND_ERR_CH1 0x20000000U
-#define  PBL_BOUND_ERR_CH0 0x10000000U
-
-#define PM_RX_INT_CAUSE 0x8fdc
-#define  ZERO_E_CMD_ERROR     0x00400000U
-#define  PMRX_FRAMING_ERROR   0x003ffff0U
-#define  OCSPI_PAR_ERROR      0x00000008U
-#define  DB_OPTIONS_PAR_ERROR 0x00000004U
-#define  IESPI_PAR_ERROR      0x00000002U
-#define  E_PCMD_PAR_ERROR     0x00000001U
-
-#define PM_TX_INT_CAUSE 0x8ffc
-#define  PCMD_LEN_OVFL0     0x80000000U
-#define  PCMD_LEN_OVFL1     0x40000000U
-#define  PCMD_LEN_OVFL2     0x20000000U
-#define  ZERO_C_CMD_ERROR   0x10000000U
-#define  PMTX_FRAMING_ERROR 0x0ffffff0U
-#define  OESPI_PAR_ERROR    0x00000008U
-#define  ICSPI_PAR_ERROR    0x00000002U
-#define  C_PCMD_PAR_ERROR   0x00000001U
+#define MA_INT_CAUSE_A 0x77e0
+
+#define MEM_PERR_INT_CAUSE_S    1
+#define MEM_PERR_INT_CAUSE_V(x) ((x) << MEM_PERR_INT_CAUSE_S)
+#define MEM_PERR_INT_CAUSE_F    MEM_PERR_INT_CAUSE_V(1U)
+
+#define MEM_WRAP_INT_CAUSE_S    0
+#define MEM_WRAP_INT_CAUSE_V(x) ((x) << MEM_WRAP_INT_CAUSE_S)
+#define MEM_WRAP_INT_CAUSE_F    MEM_WRAP_INT_CAUSE_V(1U)
+
+#define MA_INT_WRAP_STATUS_A   0x77e4
+
+#define MEM_WRAP_ADDRESS_S    4
+#define MEM_WRAP_ADDRESS_M    0xfffffffU
+#define MEM_WRAP_ADDRESS_G(x) (((x) >> MEM_WRAP_ADDRESS_S) & MEM_WRAP_ADDRESS_M)
+
+#define MEM_WRAP_CLIENT_NUM_S    0
+#define MEM_WRAP_CLIENT_NUM_M    0xfU
+#define MEM_WRAP_CLIENT_NUM_G(x) \
+       (((x) >> MEM_WRAP_CLIENT_NUM_S) & MEM_WRAP_CLIENT_NUM_M)
+
+#define MA_PARITY_ERROR_STATUS_A       0x77f4
+#define MA_PARITY_ERROR_STATUS1_A      0x77f4
+#define MA_PARITY_ERROR_STATUS2_A      0x7804
+
+/* registers for module EDC_0 */
+#define EDC_0_BASE_ADDR                0x7900
+
+#define EDC_BIST_CMD_A         0x7904
+#define EDC_BIST_CMD_ADDR_A    0x7908
+#define EDC_BIST_CMD_LEN_A     0x790c
+#define EDC_BIST_DATA_PATTERN_A 0x7910
+#define EDC_BIST_STATUS_RDATA_A        0x7928
+#define EDC_INT_CAUSE_A                0x7978
+
+#define ECC_UE_PAR_S    5
+#define ECC_UE_PAR_V(x) ((x) << ECC_UE_PAR_S)
+#define ECC_UE_PAR_F    ECC_UE_PAR_V(1U)
+
+#define ECC_CE_PAR_S    4
+#define ECC_CE_PAR_V(x) ((x) << ECC_CE_PAR_S)
+#define ECC_CE_PAR_F    ECC_CE_PAR_V(1U)
+
+#define PERR_PAR_CAUSE_S    3
+#define PERR_PAR_CAUSE_V(x) ((x) << PERR_PAR_CAUSE_S)
+#define PERR_PAR_CAUSE_F    PERR_PAR_CAUSE_V(1U)
+
+#define EDC_ECC_STATUS_A       0x797c
+
+/* registers for module EDC_1 */
+#define EDC_1_BASE_ADDR        0x7980
+
+/* registers for module CIM */
+#define CIM_BOOT_CFG_A 0x7b00
+#define CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A 0x290
+
+#define  BOOTADDR_M    0xffffff00U
+
+#define UPCRST_S    0
+#define UPCRST_V(x) ((x) << UPCRST_S)
+#define UPCRST_F    UPCRST_V(1U)
+
+#define CIM_PF_MAILBOX_DATA_A 0x240
+#define CIM_PF_MAILBOX_CTRL_A 0x280
+
+#define MBMSGVALID_S    3
+#define MBMSGVALID_V(x) ((x) << MBMSGVALID_S)
+#define MBMSGVALID_F    MBMSGVALID_V(1U)
+
+#define MBINTREQ_S    2
+#define MBINTREQ_V(x) ((x) << MBINTREQ_S)
+#define MBINTREQ_F    MBINTREQ_V(1U)
+
+#define MBOWNER_S    0
+#define MBOWNER_M    0x3U
+#define MBOWNER_V(x) ((x) << MBOWNER_S)
+#define MBOWNER_G(x) (((x) >> MBOWNER_S) & MBOWNER_M)
+
+#define CIM_PF_HOST_INT_ENABLE_A 0x288
+
+#define MBMSGRDYINTEN_S    19
+#define MBMSGRDYINTEN_V(x) ((x) << MBMSGRDYINTEN_S)
+#define MBMSGRDYINTEN_F    MBMSGRDYINTEN_V(1U)
+
+#define CIM_PF_HOST_INT_CAUSE_A 0x28c
+
+#define MBMSGRDYINT_S    19
+#define MBMSGRDYINT_V(x) ((x) << MBMSGRDYINT_S)
+#define MBMSGRDYINT_F    MBMSGRDYINT_V(1U)
+
+#define CIM_HOST_INT_CAUSE_A 0x7b2c
+
+#define TIEQOUTPARERRINT_S    20
+#define TIEQOUTPARERRINT_V(x) ((x) << TIEQOUTPARERRINT_S)
+#define TIEQOUTPARERRINT_F    TIEQOUTPARERRINT_V(1U)
+
+#define TIEQINPARERRINT_S    19
+#define TIEQINPARERRINT_V(x) ((x) << TIEQINPARERRINT_S)
+#define TIEQINPARERRINT_F    TIEQINPARERRINT_V(1U)
+
+#define PREFDROPINT_S    1
+#define PREFDROPINT_V(x) ((x) << PREFDROPINT_S)
+#define PREFDROPINT_F    PREFDROPINT_V(1U)
+
+#define UPACCNONZERO_S    0
+#define UPACCNONZERO_V(x) ((x) << UPACCNONZERO_S)
+#define UPACCNONZERO_F    UPACCNONZERO_V(1U)
+
+#define MBHOSTPARERR_S    18
+#define MBHOSTPARERR_V(x) ((x) << MBHOSTPARERR_S)
+#define MBHOSTPARERR_F    MBHOSTPARERR_V(1U)
+
+#define MBUPPARERR_S    17
+#define MBUPPARERR_V(x) ((x) << MBUPPARERR_S)
+#define MBUPPARERR_F    MBUPPARERR_V(1U)
+
+#define IBQTP0PARERR_S    16
+#define IBQTP0PARERR_V(x) ((x) << IBQTP0PARERR_S)
+#define IBQTP0PARERR_F    IBQTP0PARERR_V(1U)
+
+#define IBQTP1PARERR_S    15
+#define IBQTP1PARERR_V(x) ((x) << IBQTP1PARERR_S)
+#define IBQTP1PARERR_F    IBQTP1PARERR_V(1U)
+
+#define IBQULPPARERR_S    14
+#define IBQULPPARERR_V(x) ((x) << IBQULPPARERR_S)
+#define IBQULPPARERR_F    IBQULPPARERR_V(1U)
+
+#define IBQSGELOPARERR_S    13
+#define IBQSGELOPARERR_V(x) ((x) << IBQSGELOPARERR_S)
+#define IBQSGELOPARERR_F    IBQSGELOPARERR_V(1U)
+
+#define IBQSGEHIPARERR_S    12
+#define IBQSGEHIPARERR_V(x) ((x) << IBQSGEHIPARERR_S)
+#define IBQSGEHIPARERR_F    IBQSGEHIPARERR_V(1U)
+
+#define IBQNCSIPARERR_S    11
+#define IBQNCSIPARERR_V(x) ((x) << IBQNCSIPARERR_S)
+#define IBQNCSIPARERR_F    IBQNCSIPARERR_V(1U)
+
+#define OBQULP0PARERR_S    10
+#define OBQULP0PARERR_V(x) ((x) << OBQULP0PARERR_S)
+#define OBQULP0PARERR_F    OBQULP0PARERR_V(1U)
+
+#define OBQULP1PARERR_S    9
+#define OBQULP1PARERR_V(x) ((x) << OBQULP1PARERR_S)
+#define OBQULP1PARERR_F    OBQULP1PARERR_V(1U)
+
+#define OBQULP2PARERR_S    8
+#define OBQULP2PARERR_V(x) ((x) << OBQULP2PARERR_S)
+#define OBQULP2PARERR_F    OBQULP2PARERR_V(1U)
+
+#define OBQULP3PARERR_S    7
+#define OBQULP3PARERR_V(x) ((x) << OBQULP3PARERR_S)
+#define OBQULP3PARERR_F    OBQULP3PARERR_V(1U)
+
+#define OBQSGEPARERR_S    6
+#define OBQSGEPARERR_V(x) ((x) << OBQSGEPARERR_S)
+#define OBQSGEPARERR_F    OBQSGEPARERR_V(1U)
+
+#define OBQNCSIPARERR_S    5
+#define OBQNCSIPARERR_V(x) ((x) << OBQNCSIPARERR_S)
+#define OBQNCSIPARERR_F    OBQNCSIPARERR_V(1U)
+
+#define CIM_HOST_UPACC_INT_CAUSE_A 0x7b34
+
+#define EEPROMWRINT_S    30
+#define EEPROMWRINT_V(x) ((x) << EEPROMWRINT_S)
+#define EEPROMWRINT_F    EEPROMWRINT_V(1U)
+
+#define TIMEOUTMAINT_S    29
+#define TIMEOUTMAINT_V(x) ((x) << TIMEOUTMAINT_S)
+#define TIMEOUTMAINT_F    TIMEOUTMAINT_V(1U)
+
+#define TIMEOUTINT_S    28
+#define TIMEOUTINT_V(x) ((x) << TIMEOUTINT_S)
+#define TIMEOUTINT_F    TIMEOUTINT_V(1U)
+
+#define RSPOVRLOOKUPINT_S    27
+#define RSPOVRLOOKUPINT_V(x) ((x) << RSPOVRLOOKUPINT_S)
+#define RSPOVRLOOKUPINT_F    RSPOVRLOOKUPINT_V(1U)
+
+#define REQOVRLOOKUPINT_S    26
+#define REQOVRLOOKUPINT_V(x) ((x) << REQOVRLOOKUPINT_S)
+#define REQOVRLOOKUPINT_F    REQOVRLOOKUPINT_V(1U)
+
+#define BLKWRPLINT_S    25
+#define BLKWRPLINT_V(x) ((x) << BLKWRPLINT_S)
+#define BLKWRPLINT_F    BLKWRPLINT_V(1U)
+
+#define BLKRDPLINT_S    24
+#define BLKRDPLINT_V(x) ((x) << BLKRDPLINT_S)
+#define BLKRDPLINT_F    BLKRDPLINT_V(1U)
+
+#define SGLWRPLINT_S    23
+#define SGLWRPLINT_V(x) ((x) << SGLWRPLINT_S)
+#define SGLWRPLINT_F    SGLWRPLINT_V(1U)
+
+#define SGLRDPLINT_S    22
+#define SGLRDPLINT_V(x) ((x) << SGLRDPLINT_S)
+#define SGLRDPLINT_F    SGLRDPLINT_V(1U)
+
+#define BLKWRCTLINT_S    21
+#define BLKWRCTLINT_V(x) ((x) << BLKWRCTLINT_S)
+#define BLKWRCTLINT_F    BLKWRCTLINT_V(1U)
+
+#define BLKRDCTLINT_S    20
+#define BLKRDCTLINT_V(x) ((x) << BLKRDCTLINT_S)
+#define BLKRDCTLINT_F    BLKRDCTLINT_V(1U)
+
+#define SGLWRCTLINT_S    19
+#define SGLWRCTLINT_V(x) ((x) << SGLWRCTLINT_S)
+#define SGLWRCTLINT_F    SGLWRCTLINT_V(1U)
+
+#define SGLRDCTLINT_S    18
+#define SGLRDCTLINT_V(x) ((x) << SGLRDCTLINT_S)
+#define SGLRDCTLINT_F    SGLRDCTLINT_V(1U)
+
+#define BLKWREEPROMINT_S    17
+#define BLKWREEPROMINT_V(x) ((x) << BLKWREEPROMINT_S)
+#define BLKWREEPROMINT_F    BLKWREEPROMINT_V(1U)
+
+#define BLKRDEEPROMINT_S    16
+#define BLKRDEEPROMINT_V(x) ((x) << BLKRDEEPROMINT_S)
+#define BLKRDEEPROMINT_F    BLKRDEEPROMINT_V(1U)
+
+#define SGLWREEPROMINT_S    15
+#define SGLWREEPROMINT_V(x) ((x) << SGLWREEPROMINT_S)
+#define SGLWREEPROMINT_F    SGLWREEPROMINT_V(1U)
+
+#define SGLRDEEPROMINT_S    14
+#define SGLRDEEPROMINT_V(x) ((x) << SGLRDEEPROMINT_S)
+#define SGLRDEEPROMINT_F    SGLRDEEPROMINT_V(1U)
+
+#define BLKWRFLASHINT_S    13
+#define BLKWRFLASHINT_V(x) ((x) << BLKWRFLASHINT_S)
+#define BLKWRFLASHINT_F    BLKWRFLASHINT_V(1U)
+
+#define BLKRDFLASHINT_S    12
+#define BLKRDFLASHINT_V(x) ((x) << BLKRDFLASHINT_S)
+#define BLKRDFLASHINT_F    BLKRDFLASHINT_V(1U)
+
+#define SGLWRFLASHINT_S    11
+#define SGLWRFLASHINT_V(x) ((x) << SGLWRFLASHINT_S)
+#define SGLWRFLASHINT_F    SGLWRFLASHINT_V(1U)
+
+#define SGLRDFLASHINT_S    10
+#define SGLRDFLASHINT_V(x) ((x) << SGLRDFLASHINT_S)
+#define SGLRDFLASHINT_F    SGLRDFLASHINT_V(1U)
+
+#define BLKWRBOOTINT_S    9
+#define BLKWRBOOTINT_V(x) ((x) << BLKWRBOOTINT_S)
+#define BLKWRBOOTINT_F    BLKWRBOOTINT_V(1U)
+
+#define BLKRDBOOTINT_S    8
+#define BLKRDBOOTINT_V(x) ((x) << BLKRDBOOTINT_S)
+#define BLKRDBOOTINT_F    BLKRDBOOTINT_V(1U)
+
+#define SGLWRBOOTINT_S    7
+#define SGLWRBOOTINT_V(x) ((x) << SGLWRBOOTINT_S)
+#define SGLWRBOOTINT_F    SGLWRBOOTINT_V(1U)
+
+#define SGLRDBOOTINT_S    6
+#define SGLRDBOOTINT_V(x) ((x) << SGLRDBOOTINT_S)
+#define SGLRDBOOTINT_F    SGLRDBOOTINT_V(1U)
+
+#define ILLWRBEINT_S    5
+#define ILLWRBEINT_V(x) ((x) << ILLWRBEINT_S)
+#define ILLWRBEINT_F    ILLWRBEINT_V(1U)
+
+#define ILLRDBEINT_S    4
+#define ILLRDBEINT_V(x) ((x) << ILLRDBEINT_S)
+#define ILLRDBEINT_F    ILLRDBEINT_V(1U)
+
+#define ILLRDINT_S    3
+#define ILLRDINT_V(x) ((x) << ILLRDINT_S)
+#define ILLRDINT_F    ILLRDINT_V(1U)
+
+#define ILLWRINT_S    2
+#define ILLWRINT_V(x) ((x) << ILLWRINT_S)
+#define ILLWRINT_F    ILLWRINT_V(1U)
+
+#define ILLTRANSINT_S    1
+#define ILLTRANSINT_V(x) ((x) << ILLTRANSINT_S)
+#define ILLTRANSINT_F    ILLTRANSINT_V(1U)
+
+#define RSVDSPACEINT_S    0
+#define RSVDSPACEINT_V(x) ((x) << RSVDSPACEINT_S)
+#define RSVDSPACEINT_F    RSVDSPACEINT_V(1U)
+
+/* registers for module TP */
+#define DBGLAWHLF_S    23
+#define DBGLAWHLF_V(x) ((x) << DBGLAWHLF_S)
+#define DBGLAWHLF_F    DBGLAWHLF_V(1U)
+
+#define DBGLAWPTR_S    16
+#define DBGLAWPTR_M    0x7fU
+#define DBGLAWPTR_G(x) (((x) >> DBGLAWPTR_S) & DBGLAWPTR_M)
+
+#define DBGLAENABLE_S    12
+#define DBGLAENABLE_V(x) ((x) << DBGLAENABLE_S)
+#define DBGLAENABLE_F    DBGLAENABLE_V(1U)
+
+#define DBGLARPTR_S    0
+#define DBGLARPTR_M    0x7fU
+#define DBGLARPTR_V(x) ((x) << DBGLARPTR_S)
+
+#define TP_DBG_LA_DATAL_A      0x7ed8
+#define TP_DBG_LA_CONFIG_A     0x7ed4
+#define TP_OUT_CONFIG_A                0x7d04
+#define TP_GLOBAL_CONFIG_A     0x7d08
+
+#define DBGLAMODE_S    14
+#define DBGLAMODE_M    0x3U
+#define DBGLAMODE_G(x) (((x) >> DBGLAMODE_S) & DBGLAMODE_M)
+
+#define FIVETUPLELOOKUP_S    17
+#define FIVETUPLELOOKUP_M    0x3U
+#define FIVETUPLELOOKUP_V(x) ((x) << FIVETUPLELOOKUP_S)
+#define FIVETUPLELOOKUP_G(x) (((x) >> FIVETUPLELOOKUP_S) & FIVETUPLELOOKUP_M)
+
+#define TP_PARA_REG2_A 0x7d68
+
+#define MAXRXDATA_S    16
+#define MAXRXDATA_M    0xffffU
+#define MAXRXDATA_G(x) (((x) >> MAXRXDATA_S) & MAXRXDATA_M)
+
+#define TP_TIMER_RESOLUTION_A 0x7d90
+
+#define TIMERRESOLUTION_S    16
+#define TIMERRESOLUTION_M    0xffU
+#define TIMERRESOLUTION_G(x) (((x) >> TIMERRESOLUTION_S) & TIMERRESOLUTION_M)
+
+#define TIMESTAMPRESOLUTION_S    8
+#define TIMESTAMPRESOLUTION_M    0xffU
+#define TIMESTAMPRESOLUTION_G(x) \
+       (((x) >> TIMESTAMPRESOLUTION_S) & TIMESTAMPRESOLUTION_M)
+
+#define DELAYEDACKRESOLUTION_S    0
+#define DELAYEDACKRESOLUTION_M    0xffU
+#define DELAYEDACKRESOLUTION_G(x) \
+       (((x) >> DELAYEDACKRESOLUTION_S) & DELAYEDACKRESOLUTION_M)
+
+#define TP_SHIFT_CNT_A 0x7dc0
+#define TP_RXT_MIN_A 0x7d98
+#define TP_RXT_MAX_A 0x7d9c
+#define TP_PERS_MIN_A 0x7da0
+#define TP_PERS_MAX_A 0x7da4
+#define TP_KEEP_IDLE_A 0x7da8
+#define TP_KEEP_INTVL_A 0x7dac
+#define TP_INIT_SRTT_A 0x7db0
+#define TP_DACK_TIMER_A 0x7db4
+#define TP_FINWAIT2_TIMER_A 0x7db8
+
+#define INITSRTT_S    0
+#define INITSRTT_M    0xffffU
+#define INITSRTT_G(x) (((x) >> INITSRTT_S) & INITSRTT_M)
+
+#define PERSMAX_S    0
+#define PERSMAX_M    0x3fffffffU
+#define PERSMAX_V(x) ((x) << PERSMAX_S)
+#define PERSMAX_G(x) (((x) >> PERSMAX_S) & PERSMAX_M)
+
+#define SYNSHIFTMAX_S    24
+#define SYNSHIFTMAX_M    0xffU
+#define SYNSHIFTMAX_V(x) ((x) << SYNSHIFTMAX_S)
+#define SYNSHIFTMAX_G(x) (((x) >> SYNSHIFTMAX_S) & SYNSHIFTMAX_M)
+
+#define RXTSHIFTMAXR1_S    20
+#define RXTSHIFTMAXR1_M    0xfU
+#define RXTSHIFTMAXR1_V(x) ((x) << RXTSHIFTMAXR1_S)
+#define RXTSHIFTMAXR1_G(x) (((x) >> RXTSHIFTMAXR1_S) & RXTSHIFTMAXR1_M)
+
+#define RXTSHIFTMAXR2_S    16
+#define RXTSHIFTMAXR2_M    0xfU
+#define RXTSHIFTMAXR2_V(x) ((x) << RXTSHIFTMAXR2_S)
+#define RXTSHIFTMAXR2_G(x) (((x) >> RXTSHIFTMAXR2_S) & RXTSHIFTMAXR2_M)
+
+#define PERSHIFTBACKOFFMAX_S    12
+#define PERSHIFTBACKOFFMAX_M    0xfU
+#define PERSHIFTBACKOFFMAX_V(x) ((x) << PERSHIFTBACKOFFMAX_S)
+#define PERSHIFTBACKOFFMAX_G(x) \
+       (((x) >> PERSHIFTBACKOFFMAX_S) & PERSHIFTBACKOFFMAX_M)
+
+#define PERSHIFTMAX_S    8
+#define PERSHIFTMAX_M    0xfU
+#define PERSHIFTMAX_V(x) ((x) << PERSHIFTMAX_S)
+#define PERSHIFTMAX_G(x) (((x) >> PERSHIFTMAX_S) & PERSHIFTMAX_M)
+
+#define KEEPALIVEMAXR1_S    4
+#define KEEPALIVEMAXR1_M    0xfU
+#define KEEPALIVEMAXR1_V(x) ((x) << KEEPALIVEMAXR1_S)
+#define KEEPALIVEMAXR1_G(x) (((x) >> KEEPALIVEMAXR1_S) & KEEPALIVEMAXR1_M)
+
+#define KEEPALIVEMAXR2_S    0
+#define KEEPALIVEMAXR2_M    0xfU
+#define KEEPALIVEMAXR2_V(x) ((x) << KEEPALIVEMAXR2_S)
+#define KEEPALIVEMAXR2_G(x) (((x) >> KEEPALIVEMAXR2_S) & KEEPALIVEMAXR2_M)
+
+#define ROWINDEX_S    16
+#define ROWINDEX_V(x) ((x) << ROWINDEX_S)
+
+#define TP_CCTRL_TABLE_A       0x7ddc
+#define TP_MTU_TABLE_A         0x7de4
+
+#define MTUINDEX_S    24
+#define MTUINDEX_V(x) ((x) << MTUINDEX_S)
+
+#define MTUWIDTH_S    16
+#define MTUWIDTH_M    0xfU
+#define MTUWIDTH_V(x) ((x) << MTUWIDTH_S)
+#define MTUWIDTH_G(x) (((x) >> MTUWIDTH_S) & MTUWIDTH_M)
+
+#define MTUVALUE_S    0
+#define MTUVALUE_M    0x3fffU
+#define MTUVALUE_V(x) ((x) << MTUVALUE_S)
+#define MTUVALUE_G(x) (((x) >> MTUVALUE_S) & MTUVALUE_M)
+
+#define TP_RSS_LKP_TABLE_A     0x7dec
+
+#define LKPTBLROWVLD_S    31
+#define LKPTBLROWVLD_V(x) ((x) << LKPTBLROWVLD_S)
+#define LKPTBLROWVLD_F    LKPTBLROWVLD_V(1U)
+
+#define LKPTBLQUEUE1_S    10
+#define LKPTBLQUEUE1_M    0x3ffU
+#define LKPTBLQUEUE1_G(x) (((x) >> LKPTBLQUEUE1_S) & LKPTBLQUEUE1_M)
+
+#define LKPTBLQUEUE0_S    0
+#define LKPTBLQUEUE0_M    0x3ffU
+#define LKPTBLQUEUE0_G(x) (((x) >> LKPTBLQUEUE0_S) & LKPTBLQUEUE0_M)
+
+#define TP_PIO_ADDR_A  0x7e40
+#define TP_PIO_DATA_A  0x7e44
+#define TP_MIB_INDEX_A 0x7e50
+#define TP_MIB_DATA_A  0x7e54
+#define TP_INT_CAUSE_A 0x7e74
+
+#define FLMTXFLSTEMPTY_S    30
+#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
+#define FLMTXFLSTEMPTY_F    FLMTXFLSTEMPTY_V(1U)
+
+#define TP_VLAN_PRI_MAP_A 0x140
+
+#define FRAGMENTATION_S    9
+#define FRAGMENTATION_V(x) ((x) << FRAGMENTATION_S)
+#define FRAGMENTATION_F    FRAGMENTATION_V(1U)
+
+#define MPSHITTYPE_S    8
+#define MPSHITTYPE_V(x) ((x) << MPSHITTYPE_S)
+#define MPSHITTYPE_F    MPSHITTYPE_V(1U)
+
+#define MACMATCH_S    7
+#define MACMATCH_V(x) ((x) << MACMATCH_S)
+#define MACMATCH_F    MACMATCH_V(1U)
+
+#define ETHERTYPE_S    6
+#define ETHERTYPE_V(x) ((x) << ETHERTYPE_S)
+#define ETHERTYPE_F    ETHERTYPE_V(1U)
+
+#define PROTOCOL_S    5
+#define PROTOCOL_V(x) ((x) << PROTOCOL_S)
+#define PROTOCOL_F    PROTOCOL_V(1U)
+
+#define TOS_S    4
+#define TOS_V(x) ((x) << TOS_S)
+#define TOS_F    TOS_V(1U)
+
+#define VLAN_S    3
+#define VLAN_V(x) ((x) << VLAN_S)
+#define VLAN_F    VLAN_V(1U)
+
+#define VNIC_ID_S    2
+#define VNIC_ID_V(x) ((x) << VNIC_ID_S)
+#define VNIC_ID_F    VNIC_ID_V(1U)
+
+#define PORT_S    1
+#define PORT_V(x) ((x) << PORT_S)
+#define PORT_F    PORT_V(1U)
+
+#define FCOE_S    0
+#define FCOE_V(x) ((x) << FCOE_S)
+#define FCOE_F    FCOE_V(1U)
+
+#define FILTERMODE_S    15
+#define FILTERMODE_V(x) ((x) << FILTERMODE_S)
+#define FILTERMODE_F    FILTERMODE_V(1U)
+
+#define FCOEMASK_S    14
+#define FCOEMASK_V(x) ((x) << FCOEMASK_S)
+#define FCOEMASK_F    FCOEMASK_V(1U)
+
+#define TP_INGRESS_CONFIG_A    0x141
+
+#define VNIC_S    11
+#define VNIC_V(x) ((x) << VNIC_S)
+#define VNIC_F    VNIC_V(1U)
+
+#define CSUM_HAS_PSEUDO_HDR_S    10
+#define CSUM_HAS_PSEUDO_HDR_V(x) ((x) << CSUM_HAS_PSEUDO_HDR_S)
+#define CSUM_HAS_PSEUDO_HDR_F    CSUM_HAS_PSEUDO_HDR_V(1U)
+
+#define TP_MIB_MAC_IN_ERR_0_A  0x0
+#define TP_MIB_TCP_OUT_RST_A   0xc
+#define TP_MIB_TCP_IN_SEG_HI_A 0x10
+#define TP_MIB_TCP_IN_SEG_LO_A 0x11
+#define TP_MIB_TCP_OUT_SEG_HI_A        0x12
+#define TP_MIB_TCP_OUT_SEG_LO_A 0x13
+#define TP_MIB_TCP_RXT_SEG_HI_A        0x14
+#define TP_MIB_TCP_RXT_SEG_LO_A        0x15
+#define TP_MIB_TNL_CNG_DROP_0_A 0x18
+#define TP_MIB_TCP_V6IN_ERR_0_A 0x28
+#define TP_MIB_TCP_V6OUT_RST_A 0x2c
+#define TP_MIB_OFD_ARP_DROP_A  0x36
+#define TP_MIB_TNL_DROP_0_A    0x44
+#define TP_MIB_OFD_VLN_DROP_0_A        0x58
+
+#define ULP_TX_INT_CAUSE_A     0x8dcc
+
+#define PBL_BOUND_ERR_CH3_S    31
+#define PBL_BOUND_ERR_CH3_V(x) ((x) << PBL_BOUND_ERR_CH3_S)
+#define PBL_BOUND_ERR_CH3_F    PBL_BOUND_ERR_CH3_V(1U)
+
+#define PBL_BOUND_ERR_CH2_S    30
+#define PBL_BOUND_ERR_CH2_V(x) ((x) << PBL_BOUND_ERR_CH2_S)
+#define PBL_BOUND_ERR_CH2_F    PBL_BOUND_ERR_CH2_V(1U)
+
+#define PBL_BOUND_ERR_CH1_S    29
+#define PBL_BOUND_ERR_CH1_V(x) ((x) << PBL_BOUND_ERR_CH1_S)
+#define PBL_BOUND_ERR_CH1_F    PBL_BOUND_ERR_CH1_V(1U)
+
+#define PBL_BOUND_ERR_CH0_S    28
+#define PBL_BOUND_ERR_CH0_V(x) ((x) << PBL_BOUND_ERR_CH0_S)
+#define PBL_BOUND_ERR_CH0_F    PBL_BOUND_ERR_CH0_V(1U)
+
+#define PM_RX_INT_CAUSE_A      0x8fdc
+#define PM_RX_STAT_CONFIG_A 0x8fc8
+#define PM_RX_STAT_COUNT_A 0x8fcc
+#define PM_RX_STAT_LSB_A 0x8fd0
+#define PM_RX_DBG_CTRL_A 0x8fd0
+#define PM_RX_DBG_DATA_A 0x8fd4
+#define PM_RX_DBG_STAT_MSB_A 0x10013
+
+#define PMRX_FRAMING_ERROR_F   0x003ffff0U
+
+#define ZERO_E_CMD_ERROR_S    22
+#define ZERO_E_CMD_ERROR_V(x) ((x) << ZERO_E_CMD_ERROR_S)
+#define ZERO_E_CMD_ERROR_F    ZERO_E_CMD_ERROR_V(1U)
+
+#define OCSPI_PAR_ERROR_S    3
+#define OCSPI_PAR_ERROR_V(x) ((x) << OCSPI_PAR_ERROR_S)
+#define OCSPI_PAR_ERROR_F    OCSPI_PAR_ERROR_V(1U)
+
+#define DB_OPTIONS_PAR_ERROR_S    2
+#define DB_OPTIONS_PAR_ERROR_V(x) ((x) << DB_OPTIONS_PAR_ERROR_S)
+#define DB_OPTIONS_PAR_ERROR_F    DB_OPTIONS_PAR_ERROR_V(1U)
+
+#define IESPI_PAR_ERROR_S    1
+#define IESPI_PAR_ERROR_V(x) ((x) << IESPI_PAR_ERROR_S)
+#define IESPI_PAR_ERROR_F    IESPI_PAR_ERROR_V(1U)
+
+#define PMRX_E_PCMD_PAR_ERROR_S    0
+#define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S)
+#define PMRX_E_PCMD_PAR_ERROR_F    PMRX_E_PCMD_PAR_ERROR_V(1U)
+
+#define PM_TX_INT_CAUSE_A      0x8ffc
+#define PM_TX_STAT_CONFIG_A 0x8fe8
+#define PM_TX_STAT_COUNT_A 0x8fec
+#define PM_TX_STAT_LSB_A 0x8ff0
+#define PM_TX_DBG_CTRL_A 0x8ff0
+#define PM_TX_DBG_DATA_A 0x8ff4
+#define PM_TX_DBG_STAT_MSB_A 0x1001a
+
+#define PCMD_LEN_OVFL0_S    31
+#define PCMD_LEN_OVFL0_V(x) ((x) << PCMD_LEN_OVFL0_S)
+#define PCMD_LEN_OVFL0_F    PCMD_LEN_OVFL0_V(1U)
+
+#define PCMD_LEN_OVFL1_S    30
+#define PCMD_LEN_OVFL1_V(x) ((x) << PCMD_LEN_OVFL1_S)
+#define PCMD_LEN_OVFL1_F    PCMD_LEN_OVFL1_V(1U)
+
+#define PCMD_LEN_OVFL2_S    29
+#define PCMD_LEN_OVFL2_V(x) ((x) << PCMD_LEN_OVFL2_S)
+#define PCMD_LEN_OVFL2_F    PCMD_LEN_OVFL2_V(1U)
+
+#define ZERO_C_CMD_ERROR_S    28
+#define ZERO_C_CMD_ERROR_V(x) ((x) << ZERO_C_CMD_ERROR_S)
+#define ZERO_C_CMD_ERROR_F    ZERO_C_CMD_ERROR_V(1U)
+
+#define  PMTX_FRAMING_ERROR_F 0x0ffffff0U
+
+#define OESPI_PAR_ERROR_S    3
+#define OESPI_PAR_ERROR_V(x) ((x) << OESPI_PAR_ERROR_S)
+#define OESPI_PAR_ERROR_F    OESPI_PAR_ERROR_V(1U)
+
+#define ICSPI_PAR_ERROR_S    1
+#define ICSPI_PAR_ERROR_V(x) ((x) << ICSPI_PAR_ERROR_S)
+#define ICSPI_PAR_ERROR_F    ICSPI_PAR_ERROR_V(1U)
+
+#define PMTX_C_PCMD_PAR_ERROR_S    0
+#define PMTX_C_PCMD_PAR_ERROR_V(x) ((x) << PMTX_C_PCMD_PAR_ERROR_S)
+#define PMTX_C_PCMD_PAR_ERROR_F    PMTX_C_PCMD_PAR_ERROR_V(1U)
 
 #define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
 #define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
 #define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
 #define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
 #define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
-#define MAC_PORT_CFG2 0x818
 #define MAC_PORT_MAGIC_MACID_LO 0x824
 #define MAC_PORT_MAGIC_MACID_HI 0x828
-#define MAC_PORT_EPIO_DATA0 0x8c0
-#define MAC_PORT_EPIO_DATA1 0x8c4
-#define MAC_PORT_EPIO_DATA2 0x8c8
-#define MAC_PORT_EPIO_DATA3 0x8cc
-#define MAC_PORT_EPIO_OP 0x8d0
-
-#define MPS_CMN_CTL 0x9000
-#define  NUMPORTS_MASK   0x00000003U
-#define  NUMPORTS_SHIFT  0
-#define  NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT)
-
-#define MPS_INT_CAUSE 0x9008
-#define  STATINT 0x00000020U
-#define  TXINT   0x00000010U
-#define  RXINT   0x00000008U
-#define  TRCINT  0x00000004U
-#define  CLSINT  0x00000002U
-#define  PLINT   0x00000001U
-
-#define MPS_TX_INT_CAUSE 0x9408
-#define  PORTERR    0x00010000U
-#define  FRMERR     0x00008000U
-#define  SECNTERR   0x00004000U
-#define  BUBBLE     0x00002000U
-#define  TXDESCFIFO 0x00001e00U
-#define  TXDATAFIFO 0x000001e0U
-#define  NCSIFIFO   0x00000010U
-#define  TPFIFO     0x0000000fU
-
-#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614
-#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620
-#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c
+
+#define MAC_PORT_EPIO_DATA0_A 0x8c0
+#define MAC_PORT_EPIO_DATA1_A 0x8c4
+#define MAC_PORT_EPIO_DATA2_A 0x8c8
+#define MAC_PORT_EPIO_DATA3_A 0x8cc
+#define MAC_PORT_EPIO_OP_A 0x8d0
+
+#define MAC_PORT_CFG2_A 0x818
+
+#define MPS_CMN_CTL_A  0x9000
+
+#define NUMPORTS_S    0
+#define NUMPORTS_M    0x3U
+#define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M)
+
+#define MPS_INT_CAUSE_A 0x9008
+#define MPS_TX_INT_CAUSE_A 0x9408
+
+#define FRMERR_S    15
+#define FRMERR_V(x) ((x) << FRMERR_S)
+#define FRMERR_F    FRMERR_V(1U)
+
+#define SECNTERR_S    14
+#define SECNTERR_V(x) ((x) << SECNTERR_S)
+#define SECNTERR_F    SECNTERR_V(1U)
+
+#define BUBBLE_S    13
+#define BUBBLE_V(x) ((x) << BUBBLE_S)
+#define BUBBLE_F    BUBBLE_V(1U)
+
+#define TXDESCFIFO_S    9
+#define TXDESCFIFO_M    0xfU
+#define TXDESCFIFO_V(x) ((x) << TXDESCFIFO_S)
+
+#define TXDATAFIFO_S    5
+#define TXDATAFIFO_M    0xfU
+#define TXDATAFIFO_V(x) ((x) << TXDATAFIFO_S)
+
+#define NCSIFIFO_S    4
+#define NCSIFIFO_V(x) ((x) << NCSIFIFO_S)
+#define NCSIFIFO_F    NCSIFIFO_V(1U)
+
+#define TPFIFO_S    0
+#define TPFIFO_M    0xfU
+#define TPFIFO_V(x) ((x) << TPFIFO_S)
+
+#define MPS_STAT_PERR_INT_CAUSE_SRAM_A         0x9614
+#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A      0x9620
+#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A      0x962c
 
 #define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
 #define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
 #define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
 #define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
 #define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
-#define MPS_TRC_CFG 0x9800
-#define  TRCFIFOEMPTY       0x00000010U
-#define  TRCIGNOREDROPINPUT 0x00000008U
-#define  TRCKEEPDUPLICATES  0x00000004U
-#define  TRCEN              0x00000002U
-#define  TRCMULTIFILTER     0x00000001U
-
-#define MPS_TRC_RSS_CONTROL 0x9808
-#define MPS_T5_TRC_RSS_CONTROL 0xa00c
-#define  RSSCONTROL_MASK    0x00ff0000U
-#define  RSSCONTROL_SHIFT   16
-#define  RSSCONTROL(x)      ((x) << RSSCONTROL_SHIFT)
-#define  QUEUENUMBER_MASK   0x0000ffffU
-#define  QUEUENUMBER_SHIFT  0
-#define  QUEUENUMBER(x)     ((x) << QUEUENUMBER_SHIFT)
-
-#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810
-#define  TFINVERTMATCH   0x01000000U
-#define  TFPKTTOOLARGE   0x00800000U
-#define  TFEN            0x00400000U
-#define  TFPORT_MASK     0x003c0000U
-#define  TFPORT_SHIFT    18
-#define  TFPORT(x)       ((x) << TFPORT_SHIFT)
-#define  TFPORT_GET(x)   (((x) & TFPORT_MASK) >> TFPORT_SHIFT)
-#define  TFDROP          0x00020000U
-#define  TFSOPEOPERR     0x00010000U
-#define  TFLENGTH_MASK   0x00001f00U
-#define  TFLENGTH_SHIFT  8
-#define  TFLENGTH(x)     ((x) << TFLENGTH_SHIFT)
-#define  TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT)
-#define  TFOFFSET_MASK   0x0000001fU
-#define  TFOFFSET_SHIFT  0
-#define  TFOFFSET(x)     ((x) << TFOFFSET_SHIFT)
-#define  TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT)
-
-#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820
-#define  TFMINPKTSIZE_MASK   0x01ff0000U
-#define  TFMINPKTSIZE_SHIFT  16
-#define  TFMINPKTSIZE(x)     ((x) << TFMINPKTSIZE_SHIFT)
-#define  TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT)
-#define  TFCAPTUREMAX_MASK   0x00003fffU
-#define  TFCAPTUREMAX_SHIFT  0
-#define  TFCAPTUREMAX(x)     ((x) << TFCAPTUREMAX_SHIFT)
-#define  TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT)
-
-#define MPS_TRC_INT_CAUSE 0x985c
-#define  MISCPERR 0x00000100U
-#define  PKTFIFO  0x000000f0U
-#define  FILTMEM  0x0000000fU
-
-#define MPS_TRC_FILTER0_MATCH 0x9c00
-#define MPS_TRC_FILTER0_DONT_CARE 0x9c80
-#define MPS_TRC_FILTER1_MATCH 0x9d00
-#define MPS_CLS_INT_CAUSE 0xd028
-#define  PLERRENB  0x00000008U
-#define  HASHSRAM  0x00000004U
-#define  MATCHTCAM 0x00000002U
-#define  MATCHSRAM 0x00000001U
-
-#define MPS_RX_PERR_INT_CAUSE 0x11074
-
-#define CPL_INTR_CAUSE 0x19054
-#define  CIM_OP_MAP_PERR   0x00000020U
-#define  CIM_OVFL_ERROR    0x00000010U
-#define  TP_FRAMING_ERROR  0x00000008U
-#define  SGE_FRAMING_ERROR 0x00000004U
-#define  CIM_FRAMING_ERROR 0x00000002U
-#define  ZERO_SWITCH_ERROR 0x00000001U
-
-#define SMB_INT_CAUSE 0x19090
-#define  MSTTXFIFOPARINT 0x00200000U
-#define  MSTRXFIFOPARINT 0x00100000U
-#define  SLVFIFOPARINT   0x00080000U
-
-#define ULP_RX_INT_CAUSE 0x19158
-#define ULP_RX_ISCSI_TAGMASK 0x19164
-#define ULP_RX_ISCSI_PSZ 0x19168
-#define  HPZ3_MASK   0x0f000000U
-#define  HPZ3_SHIFT  24
-#define  HPZ3(x)     ((x) << HPZ3_SHIFT)
-#define  HPZ2_MASK   0x000f0000U
-#define  HPZ2_SHIFT  16
-#define  HPZ2(x)     ((x) << HPZ2_SHIFT)
-#define  HPZ1_MASK   0x00000f00U
-#define  HPZ1_SHIFT  8
-#define  HPZ1(x)     ((x) << HPZ1_SHIFT)
-#define  HPZ0_MASK   0x0000000fU
-#define  HPZ0_SHIFT  0
-#define  HPZ0(x)     ((x) << HPZ0_SHIFT)
-
-#define ULP_RX_TDDP_PSZ 0x19178
-
-#define SF_DATA 0x193f8
-#define SF_OP 0x193fc
-#define  SF_BUSY       0x80000000U
-#define  SF_LOCK       0x00000010U
-#define  SF_CONT       0x00000008U
-#define  BYTECNT_MASK  0x00000006U
-#define  BYTECNT_SHIFT 1
-#define  BYTECNT(x)    ((x) << BYTECNT_SHIFT)
-#define  OP_WR         0x00000001U
-
-#define PL_PF_INT_CAUSE 0x3c0
-#define  PFSW  0x00000008U
-#define  PFSGE 0x00000004U
-#define  PFCIM 0x00000002U
-#define  PFMPS 0x00000001U
-
-#define PL_PF_INT_ENABLE 0x3c4
-#define PL_PF_CTL 0x3c8
-#define  SWINT 0x00000001U
-
-#define PL_WHOAMI 0x19400
-#define  SOURCEPF_MASK   0x00000700U
-#define  SOURCEPF_SHIFT  8
-#define  SOURCEPF(x)     ((x) << SOURCEPF_SHIFT)
-#define  SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT)
-#define  ISVF            0x00000080U
-#define  VFID_MASK       0x0000007fU
-#define  VFID_SHIFT      0
-#define  VFID(x)         ((x) << VFID_SHIFT)
-#define  VFID_GET(x)     (((x) & VFID_MASK) >> VFID_SHIFT)
-
-#define PL_INT_CAUSE 0x1940c
-#define  ULP_TX     0x08000000U
-#define  SGE        0x04000000U
-#define  HMA        0x02000000U
-#define  CPL_SWITCH 0x01000000U
-#define  ULP_RX     0x00800000U
-#define  PM_RX      0x00400000U
-#define  PM_TX      0x00200000U
-#define  MA         0x00100000U
-#define  TP         0x00080000U
-#define  LE         0x00040000U
-#define  EDC1       0x00020000U
-#define  EDC0       0x00010000U
-#define  MC         0x00008000U
-#define  PCIE       0x00004000U
-#define  PMU        0x00002000U
-#define  XGMAC_KR1  0x00001000U
-#define  XGMAC_KR0  0x00000800U
-#define  XGMAC1     0x00000400U
-#define  XGMAC0     0x00000200U
-#define  SMB        0x00000100U
-#define  SF         0x00000080U
-#define  PL         0x00000040U
-#define  NCSI       0x00000020U
-#define  MPS        0x00000010U
-#define  MI         0x00000008U
-#define  DBG        0x00000004U
-#define  I2CM       0x00000002U
-#define  CIM        0x00000001U
-
-#define MC1 0x31
-#define PL_INT_ENABLE 0x19410
-#define PL_INT_MAP0 0x19414
-#define PL_RST 0x19428
-#define  PIORST     0x00000002U
-#define  PIORSTMODE 0x00000001U
-
-#define PL_PL_INT_CAUSE 0x19430
-#define  FATALPERR 0x00000010U
-#define  PERRVFID  0x00000001U
-
-#define PL_REV 0x1943c
-
-#define S_REV    0
-#define M_REV    0xfU
-#define V_REV(x) ((x) << S_REV)
-#define G_REV(x) (((x) >> S_REV) & M_REV)
-
-#define LE_DB_CONFIG 0x19c04
-#define  HASHEN 0x00100000U
-
-#define LE_DB_SERVER_INDEX 0x19c18
-#define LE_DB_ACT_CNT_IPV4 0x19c20
-#define LE_DB_ACT_CNT_IPV6 0x19c24
-
-#define LE_DB_INT_CAUSE 0x19c3c
-#define  REQQPARERR 0x00010000U
-#define  UNKNOWNCMD 0x00008000U
-#define  PARITYERR  0x00000040U
-#define  LIPMISS    0x00000020U
-#define  LIP0       0x00000010U
-
-#define LE_DB_TID_HASHBASE 0x19df8
-
-#define NCSI_INT_CAUSE 0x1a0d8
-#define  CIM_DM_PRTY_ERR 0x00000100U
-#define  MPS_DM_PRTY_ERR 0x00000080U
-#define  TXFIFO_PRTY_ERR 0x00000002U
-#define  RXFIFO_PRTY_ERR 0x00000001U
-
-#define XGMAC_PORT_CFG2 0x1018
-#define  PATEN   0x00040000U
-#define  MAGICEN 0x00020000U
 
-#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
-#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
+#define MPS_TRC_CFG_A 0x9800
+
+#define TRCFIFOEMPTY_S    4
+#define TRCFIFOEMPTY_V(x) ((x) << TRCFIFOEMPTY_S)
+#define TRCFIFOEMPTY_F    TRCFIFOEMPTY_V(1U)
+
+#define TRCIGNOREDROPINPUT_S    3
+#define TRCIGNOREDROPINPUT_V(x) ((x) << TRCIGNOREDROPINPUT_S)
+#define TRCIGNOREDROPINPUT_F    TRCIGNOREDROPINPUT_V(1U)
+
+#define TRCKEEPDUPLICATES_S    2
+#define TRCKEEPDUPLICATES_V(x) ((x) << TRCKEEPDUPLICATES_S)
+#define TRCKEEPDUPLICATES_F    TRCKEEPDUPLICATES_V(1U)
+
+#define TRCEN_S    1
+#define TRCEN_V(x) ((x) << TRCEN_S)
+#define TRCEN_F    TRCEN_V(1U)
+
+#define TRCMULTIFILTER_S    0
+#define TRCMULTIFILTER_V(x) ((x) << TRCMULTIFILTER_S)
+#define TRCMULTIFILTER_F    TRCMULTIFILTER_V(1U)
+
+#define MPS_TRC_RSS_CONTROL_A          0x9808
+#define MPS_T5_TRC_RSS_CONTROL_A       0xa00c
+
+#define RSSCONTROL_S    16
+#define RSSCONTROL_V(x) ((x) << RSSCONTROL_S)
+
+#define QUEUENUMBER_S    0
+#define QUEUENUMBER_V(x) ((x) << QUEUENUMBER_S)
+
+#define TP_RSS_CONFIG_A 0x7df0
+
+#define TNL4TUPENIPV6_S    31
+#define TNL4TUPENIPV6_V(x) ((x) << TNL4TUPENIPV6_S)
+#define TNL4TUPENIPV6_F    TNL4TUPENIPV6_V(1U)
+
+#define TNL2TUPENIPV6_S    30
+#define TNL2TUPENIPV6_V(x) ((x) << TNL2TUPENIPV6_S)
+#define TNL2TUPENIPV6_F    TNL2TUPENIPV6_V(1U)
+
+#define TNL4TUPENIPV4_S    29
+#define TNL4TUPENIPV4_V(x) ((x) << TNL4TUPENIPV4_S)
+#define TNL4TUPENIPV4_F    TNL4TUPENIPV4_V(1U)
+
+#define TNL2TUPENIPV4_S    28
+#define TNL2TUPENIPV4_V(x) ((x) << TNL2TUPENIPV4_S)
+#define TNL2TUPENIPV4_F    TNL2TUPENIPV4_V(1U)
+
+#define TNLTCPSEL_S    27
+#define TNLTCPSEL_V(x) ((x) << TNLTCPSEL_S)
+#define TNLTCPSEL_F    TNLTCPSEL_V(1U)
+
+#define TNLIP6SEL_S    26
+#define TNLIP6SEL_V(x) ((x) << TNLIP6SEL_S)
+#define TNLIP6SEL_F    TNLIP6SEL_V(1U)
+
+#define TNLVRTSEL_S    25
+#define TNLVRTSEL_V(x) ((x) << TNLVRTSEL_S)
+#define TNLVRTSEL_F    TNLVRTSEL_V(1U)
+
+#define TNLMAPEN_S    24
+#define TNLMAPEN_V(x) ((x) << TNLMAPEN_S)
+#define TNLMAPEN_F    TNLMAPEN_V(1U)
+
+#define OFDHASHSAVE_S    19
+#define OFDHASHSAVE_V(x) ((x) << OFDHASHSAVE_S)
+#define OFDHASHSAVE_F    OFDHASHSAVE_V(1U)
+
+#define OFDVRTSEL_S    18
+#define OFDVRTSEL_V(x) ((x) << OFDVRTSEL_S)
+#define OFDVRTSEL_F    OFDVRTSEL_V(1U)
+
+#define OFDMAPEN_S    17
+#define OFDMAPEN_V(x) ((x) << OFDMAPEN_S)
+#define OFDMAPEN_F    OFDMAPEN_V(1U)
+
+#define OFDLKPEN_S    16
+#define OFDLKPEN_V(x) ((x) << OFDLKPEN_S)
+#define OFDLKPEN_F    OFDLKPEN_V(1U)
+
+#define SYN4TUPENIPV6_S    15
+#define SYN4TUPENIPV6_V(x) ((x) << SYN4TUPENIPV6_S)
+#define SYN4TUPENIPV6_F    SYN4TUPENIPV6_V(1U)
+
+#define SYN2TUPENIPV6_S    14
+#define SYN2TUPENIPV6_V(x) ((x) << SYN2TUPENIPV6_S)
+#define SYN2TUPENIPV6_F    SYN2TUPENIPV6_V(1U)
+
+#define SYN4TUPENIPV4_S    13
+#define SYN4TUPENIPV4_V(x) ((x) << SYN4TUPENIPV4_S)
+#define SYN4TUPENIPV4_F    SYN4TUPENIPV4_V(1U)
+
+#define SYN2TUPENIPV4_S    12
+#define SYN2TUPENIPV4_V(x) ((x) << SYN2TUPENIPV4_S)
+#define SYN2TUPENIPV4_F    SYN2TUPENIPV4_V(1U)
+
+#define SYNIP6SEL_S    11
+#define SYNIP6SEL_V(x) ((x) << SYNIP6SEL_S)
+#define SYNIP6SEL_F    SYNIP6SEL_V(1U)
+
+#define SYNVRTSEL_S    10
+#define SYNVRTSEL_V(x) ((x) << SYNVRTSEL_S)
+#define SYNVRTSEL_F    SYNVRTSEL_V(1U)
+
+#define SYNMAPEN_S    9
+#define SYNMAPEN_V(x) ((x) << SYNMAPEN_S)
+#define SYNMAPEN_F    SYNMAPEN_V(1U)
+
+#define SYNLKPEN_S    8
+#define SYNLKPEN_V(x) ((x) << SYNLKPEN_S)
+#define SYNLKPEN_F    SYNLKPEN_V(1U)
+
+#define CHANNELENABLE_S    7
+#define CHANNELENABLE_V(x) ((x) << CHANNELENABLE_S)
+#define CHANNELENABLE_F    CHANNELENABLE_V(1U)
+
+#define PORTENABLE_S    6
+#define PORTENABLE_V(x) ((x) << PORTENABLE_S)
+#define PORTENABLE_F    PORTENABLE_V(1U)
+
+#define TNLALLLOOKUP_S    5
+#define TNLALLLOOKUP_V(x) ((x) << TNLALLLOOKUP_S)
+#define TNLALLLOOKUP_F    TNLALLLOOKUP_V(1U)
+
+#define VIRTENABLE_S    4
+#define VIRTENABLE_V(x) ((x) << VIRTENABLE_S)
+#define VIRTENABLE_F    VIRTENABLE_V(1U)
+
+#define CONGESTIONENABLE_S    3
+#define CONGESTIONENABLE_V(x) ((x) << CONGESTIONENABLE_S)
+#define CONGESTIONENABLE_F    CONGESTIONENABLE_V(1U)
+
+#define HASHTOEPLITZ_S    2
+#define HASHTOEPLITZ_V(x) ((x) << HASHTOEPLITZ_S)
+#define HASHTOEPLITZ_F    HASHTOEPLITZ_V(1U)
+
+#define UDPENABLE_S    1
+#define UDPENABLE_V(x) ((x) << UDPENABLE_S)
+#define UDPENABLE_F    UDPENABLE_V(1U)
+
+#define DISABLE_S    0
+#define DISABLE_V(x) ((x) << DISABLE_S)
+#define DISABLE_F    DISABLE_V(1U)
+
+#define TP_RSS_CONFIG_TNL_A 0x7df4
+
+#define MASKSIZE_S    28
+#define MASKSIZE_M    0xfU
+#define MASKSIZE_V(x) ((x) << MASKSIZE_S)
+#define MASKSIZE_G(x) (((x) >> MASKSIZE_S) & MASKSIZE_M)
+
+#define MASKFILTER_S    16
+#define MASKFILTER_M    0x7ffU
+#define MASKFILTER_V(x) ((x) << MASKFILTER_S)
+#define MASKFILTER_G(x) (((x) >> MASKFILTER_S) & MASKFILTER_M)
+
+#define USEWIRECH_S    0
+#define USEWIRECH_V(x) ((x) << USEWIRECH_S)
+#define USEWIRECH_F    USEWIRECH_V(1U)
+
+#define HASHALL_S    2
+#define HASHALL_V(x) ((x) << HASHALL_S)
+#define HASHALL_F    HASHALL_V(1U)
+
+#define HASHETH_S    1
+#define HASHETH_V(x) ((x) << HASHETH_S)
+#define HASHETH_F    HASHETH_V(1U)
+
+#define TP_RSS_CONFIG_OFD_A 0x7df8
+
+#define RRCPLMAPEN_S    20
+#define RRCPLMAPEN_V(x) ((x) << RRCPLMAPEN_S)
+#define RRCPLMAPEN_F    RRCPLMAPEN_V(1U)
+
+#define RRCPLQUEWIDTH_S    16
+#define RRCPLQUEWIDTH_M    0xfU
+#define RRCPLQUEWIDTH_V(x) ((x) << RRCPLQUEWIDTH_S)
+#define RRCPLQUEWIDTH_G(x) (((x) >> RRCPLQUEWIDTH_S) & RRCPLQUEWIDTH_M)
+
+#define TP_RSS_CONFIG_SYN_A 0x7dfc
+#define TP_RSS_CONFIG_VRT_A 0x7e00
+
+#define VFRDRG_S    25
+#define VFRDRG_V(x) ((x) << VFRDRG_S)
+#define VFRDRG_F    VFRDRG_V(1U)
+
+#define VFRDEN_S    24
+#define VFRDEN_V(x) ((x) << VFRDEN_S)
+#define VFRDEN_F    VFRDEN_V(1U)
+
+#define VFPERREN_S    23
+#define VFPERREN_V(x) ((x) << VFPERREN_S)
+#define VFPERREN_F    VFPERREN_V(1U)
+
+#define KEYPERREN_S    22
+#define KEYPERREN_V(x) ((x) << KEYPERREN_S)
+#define KEYPERREN_F    KEYPERREN_V(1U)
+
+#define DISABLEVLAN_S    21
+#define DISABLEVLAN_V(x) ((x) << DISABLEVLAN_S)
+#define DISABLEVLAN_F    DISABLEVLAN_V(1U)
+
+#define ENABLEUP0_S    20
+#define ENABLEUP0_V(x) ((x) << ENABLEUP0_S)
+#define ENABLEUP0_F    ENABLEUP0_V(1U)
+
+#define HASHDELAY_S    16
+#define HASHDELAY_M    0xfU
+#define HASHDELAY_V(x) ((x) << HASHDELAY_S)
+#define HASHDELAY_G(x) (((x) >> HASHDELAY_S) & HASHDELAY_M)
+
+#define VFWRADDR_S    8
+#define VFWRADDR_M    0x7fU
+#define VFWRADDR_V(x) ((x) << VFWRADDR_S)
+#define VFWRADDR_G(x) (((x) >> VFWRADDR_S) & VFWRADDR_M)
+
+#define KEYMODE_S    6
+#define KEYMODE_M    0x3U
+#define KEYMODE_V(x) ((x) << KEYMODE_S)
+#define KEYMODE_G(x) (((x) >> KEYMODE_S) & KEYMODE_M)
+
+#define VFWREN_S    5
+#define VFWREN_V(x) ((x) << VFWREN_S)
+#define VFWREN_F    VFWREN_V(1U)
+
+#define KEYWREN_S    4
+#define KEYWREN_V(x) ((x) << KEYWREN_S)
+#define KEYWREN_F    KEYWREN_V(1U)
+
+#define KEYWRADDR_S    0
+#define KEYWRADDR_M    0xfU
+#define KEYWRADDR_V(x) ((x) << KEYWRADDR_S)
+#define KEYWRADDR_G(x) (((x) >> KEYWRADDR_S) & KEYWRADDR_M)
+
+#define KEYWRADDRX_S    30
+#define KEYWRADDRX_M    0x3U
+#define KEYWRADDRX_V(x) ((x) << KEYWRADDRX_S)
+#define KEYWRADDRX_G(x) (((x) >> KEYWRADDRX_S) & KEYWRADDRX_M)
+
+#define KEYEXTEND_S    26
+#define KEYEXTEND_V(x) ((x) << KEYEXTEND_S)
+#define KEYEXTEND_F    KEYEXTEND_V(1U)
+
+#define LKPIDXSIZE_S    24
+#define LKPIDXSIZE_M    0x3U
+#define LKPIDXSIZE_V(x) ((x) << LKPIDXSIZE_S)
+#define LKPIDXSIZE_G(x) (((x) >> LKPIDXSIZE_S) & LKPIDXSIZE_M)
+
+#define TP_RSS_VFL_CONFIG_A 0x3a
+#define TP_RSS_VFH_CONFIG_A 0x3b
+
+#define ENABLEUDPHASH_S    31
+#define ENABLEUDPHASH_V(x) ((x) << ENABLEUDPHASH_S)
+#define ENABLEUDPHASH_F    ENABLEUDPHASH_V(1U)
+
+#define VFUPEN_S    30
+#define VFUPEN_V(x) ((x) << VFUPEN_S)
+#define VFUPEN_F    VFUPEN_V(1U)
+
+#define VFVLNEX_S    28
+#define VFVLNEX_V(x) ((x) << VFVLNEX_S)
+#define VFVLNEX_F    VFVLNEX_V(1U)
+
+#define VFPRTEN_S    27
+#define VFPRTEN_V(x) ((x) << VFPRTEN_S)
+#define VFPRTEN_F    VFPRTEN_V(1U)
+
+#define VFCHNEN_S    26
+#define VFCHNEN_V(x) ((x) << VFCHNEN_S)
+#define VFCHNEN_F    VFCHNEN_V(1U)
+
+#define DEFAULTQUEUE_S    16
+#define DEFAULTQUEUE_M    0x3ffU
+#define DEFAULTQUEUE_G(x) (((x) >> DEFAULTQUEUE_S) & DEFAULTQUEUE_M)
+
+#define VFIP6TWOTUPEN_S    6
+#define VFIP6TWOTUPEN_V(x) ((x) << VFIP6TWOTUPEN_S)
+#define VFIP6TWOTUPEN_F    VFIP6TWOTUPEN_V(1U)
+
+#define VFIP4FOURTUPEN_S    5
+#define VFIP4FOURTUPEN_V(x) ((x) << VFIP4FOURTUPEN_S)
+#define VFIP4FOURTUPEN_F    VFIP4FOURTUPEN_V(1U)
+
+#define VFIP4TWOTUPEN_S    4
+#define VFIP4TWOTUPEN_V(x) ((x) << VFIP4TWOTUPEN_S)
+#define VFIP4TWOTUPEN_F    VFIP4TWOTUPEN_V(1U)
+
+#define KEYINDEX_S    0
+#define KEYINDEX_M    0xfU
+#define KEYINDEX_G(x) (((x) >> KEYINDEX_S) & KEYINDEX_M)
+
+#define MAPENABLE_S    31
+#define MAPENABLE_V(x) ((x) << MAPENABLE_S)
+#define MAPENABLE_F    MAPENABLE_V(1U)
+
+#define CHNENABLE_S    30
+#define CHNENABLE_V(x) ((x) << CHNENABLE_S)
+#define CHNENABLE_F    CHNENABLE_V(1U)
+
+#define PRTENABLE_S    29
+#define PRTENABLE_V(x) ((x) << PRTENABLE_S)
+#define PRTENABLE_F    PRTENABLE_V(1U)
+
+#define UDPFOURTUPEN_S    28
+#define UDPFOURTUPEN_V(x) ((x) << UDPFOURTUPEN_S)
+#define UDPFOURTUPEN_F    UDPFOURTUPEN_V(1U)
+
+#define IP6FOURTUPEN_S    27
+#define IP6FOURTUPEN_V(x) ((x) << IP6FOURTUPEN_S)
+#define IP6FOURTUPEN_F    IP6FOURTUPEN_V(1U)
 
-#define XGMAC_PORT_EPIO_DATA0 0x10c0
-#define XGMAC_PORT_EPIO_DATA1 0x10c4
-#define XGMAC_PORT_EPIO_DATA2 0x10c8
-#define XGMAC_PORT_EPIO_DATA3 0x10cc
-#define XGMAC_PORT_EPIO_OP 0x10d0
-#define  EPIOWR         0x00000100U
-#define  ADDRESS_MASK   0x000000ffU
-#define  ADDRESS_SHIFT  0
-#define  ADDRESS(x)     ((x) << ADDRESS_SHIFT)
+#define IP6TWOTUPEN_S    26
+#define IP6TWOTUPEN_V(x) ((x) << IP6TWOTUPEN_S)
+#define IP6TWOTUPEN_F    IP6TWOTUPEN_V(1U)
 
-#define MAC_PORT_INT_CAUSE 0x8dc
-#define XGMAC_PORT_INT_CAUSE 0x10dc
+#define IP4FOURTUPEN_S    25
+#define IP4FOURTUPEN_V(x) ((x) << IP4FOURTUPEN_S)
+#define IP4FOURTUPEN_F    IP4FOURTUPEN_V(1U)
 
-#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28
+#define IP4TWOTUPEN_S    24
+#define IP4TWOTUPEN_V(x) ((x) << IP4TWOTUPEN_S)
+#define IP4TWOTUPEN_F    IP4TWOTUPEN_V(1U)
 
-#define A_TP_TX_MOD_CHANNEL_WEIGHT 0x7e34
+#define IVFWIDTH_S    20
+#define IVFWIDTH_M    0xfU
+#define IVFWIDTH_V(x) ((x) << IVFWIDTH_S)
+#define IVFWIDTH_G(x) (((x) >> IVFWIDTH_S) & IVFWIDTH_M)
 
-#define S_TX_MOD_QUEUE_REQ_MAP    0
-#define M_TX_MOD_QUEUE_REQ_MAP    0xffffU
-#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
+#define CH1DEFAULTQUEUE_S    10
+#define CH1DEFAULTQUEUE_M    0x3ffU
+#define CH1DEFAULTQUEUE_V(x) ((x) << CH1DEFAULTQUEUE_S)
+#define CH1DEFAULTQUEUE_G(x) (((x) >> CH1DEFAULTQUEUE_S) & CH1DEFAULTQUEUE_M)
 
-#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x7e30
+#define CH0DEFAULTQUEUE_S    0
+#define CH0DEFAULTQUEUE_M    0x3ffU
+#define CH0DEFAULTQUEUE_V(x) ((x) << CH0DEFAULTQUEUE_S)
+#define CH0DEFAULTQUEUE_G(x) (((x) >> CH0DEFAULTQUEUE_S) & CH0DEFAULTQUEUE_M)
 
-#define S_TX_MODQ_WEIGHT3    24
-#define M_TX_MODQ_WEIGHT3    0xffU
-#define V_TX_MODQ_WEIGHT3(x) ((x) << S_TX_MODQ_WEIGHT3)
+#define VFLKPIDX_S    8
+#define VFLKPIDX_M    0xffU
+#define VFLKPIDX_G(x) (((x) >> VFLKPIDX_S) & VFLKPIDX_M)
 
-#define S_TX_MODQ_WEIGHT2    16
-#define M_TX_MODQ_WEIGHT2    0xffU
-#define V_TX_MODQ_WEIGHT2(x) ((x) << S_TX_MODQ_WEIGHT2)
+#define TP_RSS_CONFIG_CNG_A 0x7e04
+#define TP_RSS_SECRET_KEY0_A 0x40
+#define TP_RSS_PF0_CONFIG_A 0x30
+#define TP_RSS_PF_MAP_A 0x38
+#define TP_RSS_PF_MSK_A 0x39
 
-#define S_TX_MODQ_WEIGHT1    8
-#define M_TX_MODQ_WEIGHT1    0xffU
-#define V_TX_MODQ_WEIGHT1(x) ((x) << S_TX_MODQ_WEIGHT1)
+#define PF1LKPIDX_S    3
 
-#define S_TX_MODQ_WEIGHT0    0
-#define M_TX_MODQ_WEIGHT0    0xffU
-#define V_TX_MODQ_WEIGHT0(x) ((x) << S_TX_MODQ_WEIGHT0)
+#define PF0LKPIDX_M    0x7U
 
-#define A_TP_TX_SCHED_HDR 0x23
+#define PF1MSKSIZE_S    4
+#define PF1MSKSIZE_M    0xfU
 
-#define A_TP_TX_SCHED_FIFO 0x24
+#define CHNCOUNT3_S    31
+#define CHNCOUNT3_V(x) ((x) << CHNCOUNT3_S)
+#define CHNCOUNT3_F    CHNCOUNT3_V(1U)
 
-#define A_TP_TX_SCHED_PCMD 0x25
+#define CHNCOUNT2_S    30
+#define CHNCOUNT2_V(x) ((x) << CHNCOUNT2_S)
+#define CHNCOUNT2_F    CHNCOUNT2_V(1U)
 
-#define S_VNIC    11
-#define V_VNIC(x) ((x) << S_VNIC)
-#define F_VNIC    V_VNIC(1U)
+#define CHNCOUNT1_S    29
+#define CHNCOUNT1_V(x) ((x) << CHNCOUNT1_S)
+#define CHNCOUNT1_F    CHNCOUNT1_V(1U)
 
-#define S_FRAGMENTATION    9
-#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
-#define F_FRAGMENTATION    V_FRAGMENTATION(1U)
+#define CHNCOUNT0_S    28
+#define CHNCOUNT0_V(x) ((x) << CHNCOUNT0_S)
+#define CHNCOUNT0_F    CHNCOUNT0_V(1U)
 
-#define S_MPSHITTYPE    8
-#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
-#define F_MPSHITTYPE    V_MPSHITTYPE(1U)
+#define CHNUNDFLOW3_S    27
+#define CHNUNDFLOW3_V(x) ((x) << CHNUNDFLOW3_S)
+#define CHNUNDFLOW3_F    CHNUNDFLOW3_V(1U)
 
-#define S_MACMATCH    7
-#define V_MACMATCH(x) ((x) << S_MACMATCH)
-#define F_MACMATCH    V_MACMATCH(1U)
+#define CHNUNDFLOW2_S    26
+#define CHNUNDFLOW2_V(x) ((x) << CHNUNDFLOW2_S)
+#define CHNUNDFLOW2_F    CHNUNDFLOW2_V(1U)
 
-#define S_ETHERTYPE    6
-#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
-#define F_ETHERTYPE    V_ETHERTYPE(1U)
+#define CHNUNDFLOW1_S    25
+#define CHNUNDFLOW1_V(x) ((x) << CHNUNDFLOW1_S)
+#define CHNUNDFLOW1_F    CHNUNDFLOW1_V(1U)
 
-#define S_PROTOCOL    5
-#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
-#define F_PROTOCOL    V_PROTOCOL(1U)
+#define CHNUNDFLOW0_S    24
+#define CHNUNDFLOW0_V(x) ((x) << CHNUNDFLOW0_S)
+#define CHNUNDFLOW0_F    CHNUNDFLOW0_V(1U)
 
-#define S_TOS    4
-#define V_TOS(x) ((x) << S_TOS)
-#define F_TOS    V_TOS(1U)
+#define RSTCHN3_S    19
+#define RSTCHN3_V(x) ((x) << RSTCHN3_S)
+#define RSTCHN3_F    RSTCHN3_V(1U)
 
-#define S_VLAN    3
-#define V_VLAN(x) ((x) << S_VLAN)
-#define F_VLAN    V_VLAN(1U)
+#define RSTCHN2_S    18
+#define RSTCHN2_V(x) ((x) << RSTCHN2_S)
+#define RSTCHN2_F    RSTCHN2_V(1U)
 
-#define S_VNIC_ID    2
-#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
-#define F_VNIC_ID    V_VNIC_ID(1U)
+#define RSTCHN1_S    17
+#define RSTCHN1_V(x) ((x) << RSTCHN1_S)
+#define RSTCHN1_F    RSTCHN1_V(1U)
 
-#define S_PORT    1
-#define V_PORT(x) ((x) << S_PORT)
-#define F_PORT    V_PORT(1U)
+#define RSTCHN0_S    16
+#define RSTCHN0_V(x) ((x) << RSTCHN0_S)
+#define RSTCHN0_F    RSTCHN0_V(1U)
 
-#define S_FCOE    0
-#define V_FCOE(x) ((x) << S_FCOE)
-#define F_FCOE    V_FCOE(1U)
+#define UPDVLD_S    15
+#define UPDVLD_V(x) ((x) << UPDVLD_S)
+#define UPDVLD_F    UPDVLD_V(1U)
+
+#define XOFF_S    14
+#define XOFF_V(x) ((x) << XOFF_S)
+#define XOFF_F    XOFF_V(1U)
+
+#define UPDCHN3_S    13
+#define UPDCHN3_V(x) ((x) << UPDCHN3_S)
+#define UPDCHN3_F    UPDCHN3_V(1U)
+
+#define UPDCHN2_S    12
+#define UPDCHN2_V(x) ((x) << UPDCHN2_S)
+#define UPDCHN2_F    UPDCHN2_V(1U)
+
+#define UPDCHN1_S    11
+#define UPDCHN1_V(x) ((x) << UPDCHN1_S)
+#define UPDCHN1_F    UPDCHN1_V(1U)
+
+#define UPDCHN0_S    10
+#define UPDCHN0_V(x) ((x) << UPDCHN0_S)
+#define UPDCHN0_F    UPDCHN0_V(1U)
+
+#define QUEUE_S    0
+#define QUEUE_M    0x3ffU
+#define QUEUE_V(x) ((x) << QUEUE_S)
+#define QUEUE_G(x) (((x) >> QUEUE_S) & QUEUE_M)
+
+#define MPS_TRC_INT_CAUSE_A    0x985c
+
+#define MISCPERR_S    8
+#define MISCPERR_V(x) ((x) << MISCPERR_S)
+#define MISCPERR_F    MISCPERR_V(1U)
+
+#define PKTFIFO_S    4
+#define PKTFIFO_M    0xfU
+#define PKTFIFO_V(x) ((x) << PKTFIFO_S)
+
+#define FILTMEM_S    0
+#define FILTMEM_M    0xfU
+#define FILTMEM_V(x) ((x) << FILTMEM_S)
+
+#define MPS_CLS_INT_CAUSE_A 0xd028
+
+#define HASHSRAM_S    2
+#define HASHSRAM_V(x) ((x) << HASHSRAM_S)
+#define HASHSRAM_F    HASHSRAM_V(1U)
+
+#define MATCHTCAM_S    1
+#define MATCHTCAM_V(x) ((x) << MATCHTCAM_S)
+#define MATCHTCAM_F    MATCHTCAM_V(1U)
+
+#define MATCHSRAM_S    0
+#define MATCHSRAM_V(x) ((x) << MATCHSRAM_S)
+#define MATCHSRAM_F    MATCHSRAM_V(1U)
+
+#define MPS_RX_PERR_INT_CAUSE_A 0x11074
+
+#define MPS_CLS_TCAM_Y_L_A 0xf000
+#define MPS_CLS_TCAM_X_L_A 0xf008
+
+#define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16)
+#define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
+
+#define MPS_CLS_TCAM_X_L(idx) (MPS_CLS_TCAM_X_L_A + (idx) * 16)
+#define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
+
+#define MPS_CLS_SRAM_L_A 0xe000
+#define MPS_CLS_SRAM_H_A 0xe004
+
+#define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8)
+#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
+
+#define MPS_CLS_SRAM_H(idx) (MPS_CLS_SRAM_H_A + (idx) * 8)
+#define NUM_MPS_CLS_SRAM_H_INSTANCES 336
+
+#define MULTILISTEN0_S    25
+
+#define REPLICATE_S    11
+#define REPLICATE_V(x) ((x) << REPLICATE_S)
+#define REPLICATE_F    REPLICATE_V(1U)
+
+#define PF_S    8
+#define PF_M    0x7U
+#define PF_G(x) (((x) >> PF_S) & PF_M)
+
+#define VF_VALID_S    7
+#define VF_VALID_V(x) ((x) << VF_VALID_S)
+#define VF_VALID_F    VF_VALID_V(1U)
+
+#define VF_S    0
+#define VF_M    0x7fU
+#define VF_G(x) (((x) >> VF_S) & VF_M)
+
+#define SRAM_PRIO3_S    22
+#define SRAM_PRIO3_M    0x7U
+#define SRAM_PRIO3_G(x) (((x) >> SRAM_PRIO3_S) & SRAM_PRIO3_M)
+
+#define SRAM_PRIO2_S    19
+#define SRAM_PRIO2_M    0x7U
+#define SRAM_PRIO2_G(x) (((x) >> SRAM_PRIO2_S) & SRAM_PRIO2_M)
+
+#define SRAM_PRIO1_S    16
+#define SRAM_PRIO1_M    0x7U
+#define SRAM_PRIO1_G(x) (((x) >> SRAM_PRIO1_S) & SRAM_PRIO1_M)
+
+#define SRAM_PRIO0_S    13
+#define SRAM_PRIO0_M    0x7U
+#define SRAM_PRIO0_G(x) (((x) >> SRAM_PRIO0_S) & SRAM_PRIO0_M)
+
+#define SRAM_VLD_S    12
+#define SRAM_VLD_V(x) ((x) << SRAM_VLD_S)
+#define SRAM_VLD_F    SRAM_VLD_V(1U)
+
+#define PORTMAP_S    0
+#define PORTMAP_M    0xfU
+#define PORTMAP_G(x) (((x) >> PORTMAP_S) & PORTMAP_M)
+
+#define CPL_INTR_CAUSE_A 0x19054
+
+#define CIM_OP_MAP_PERR_S    5
+#define CIM_OP_MAP_PERR_V(x) ((x) << CIM_OP_MAP_PERR_S)
+#define CIM_OP_MAP_PERR_F    CIM_OP_MAP_PERR_V(1U)
+
+#define CIM_OVFL_ERROR_S    4
+#define CIM_OVFL_ERROR_V(x) ((x) << CIM_OVFL_ERROR_S)
+#define CIM_OVFL_ERROR_F    CIM_OVFL_ERROR_V(1U)
+
+#define TP_FRAMING_ERROR_S    3
+#define TP_FRAMING_ERROR_V(x) ((x) << TP_FRAMING_ERROR_S)
+#define TP_FRAMING_ERROR_F    TP_FRAMING_ERROR_V(1U)
+
+#define SGE_FRAMING_ERROR_S    2
+#define SGE_FRAMING_ERROR_V(x) ((x) << SGE_FRAMING_ERROR_S)
+#define SGE_FRAMING_ERROR_F    SGE_FRAMING_ERROR_V(1U)
+
+#define CIM_FRAMING_ERROR_S    1
+#define CIM_FRAMING_ERROR_V(x) ((x) << CIM_FRAMING_ERROR_S)
+#define CIM_FRAMING_ERROR_F    CIM_FRAMING_ERROR_V(1U)
+
+#define ZERO_SWITCH_ERROR_S    0
+#define ZERO_SWITCH_ERROR_V(x) ((x) << ZERO_SWITCH_ERROR_S)
+#define ZERO_SWITCH_ERROR_F    ZERO_SWITCH_ERROR_V(1U)
+
+#define SMB_INT_CAUSE_A 0x19090
+
+#define MSTTXFIFOPARINT_S    21
+#define MSTTXFIFOPARINT_V(x) ((x) << MSTTXFIFOPARINT_S)
+#define MSTTXFIFOPARINT_F    MSTTXFIFOPARINT_V(1U)
+
+#define MSTRXFIFOPARINT_S    20
+#define MSTRXFIFOPARINT_V(x) ((x) << MSTRXFIFOPARINT_S)
+#define MSTRXFIFOPARINT_F    MSTRXFIFOPARINT_V(1U)
+
+#define SLVFIFOPARINT_S    19
+#define SLVFIFOPARINT_V(x) ((x) << SLVFIFOPARINT_S)
+#define SLVFIFOPARINT_F    SLVFIFOPARINT_V(1U)
+
+#define ULP_RX_INT_CAUSE_A 0x19158
+#define ULP_RX_ISCSI_TAGMASK_A 0x19164
+#define ULP_RX_ISCSI_PSZ_A 0x19168
+#define ULP_RX_LA_CTL_A 0x1923c
+#define ULP_RX_LA_RDPTR_A 0x19240
+#define ULP_RX_LA_RDDATA_A 0x19244
+#define ULP_RX_LA_WRPTR_A 0x19248
+
+#define HPZ3_S    24
+#define HPZ3_V(x) ((x) << HPZ3_S)
+
+#define HPZ2_S    16
+#define HPZ2_V(x) ((x) << HPZ2_S)
+
+#define HPZ1_S    8
+#define HPZ1_V(x) ((x) << HPZ1_S)
+
+#define HPZ0_S    0
+#define HPZ0_V(x) ((x) << HPZ0_S)
+
+#define ULP_RX_TDDP_PSZ_A 0x19178
+
+/* registers for module SF */
+#define SF_DATA_A 0x193f8
+#define SF_OP_A 0x193fc
+
+#define SF_BUSY_S    31
+#define SF_BUSY_V(x) ((x) << SF_BUSY_S)
+#define SF_BUSY_F    SF_BUSY_V(1U)
+
+#define SF_LOCK_S    4
+#define SF_LOCK_V(x) ((x) << SF_LOCK_S)
+#define SF_LOCK_F    SF_LOCK_V(1U)
+
+#define SF_CONT_S    3
+#define SF_CONT_V(x) ((x) << SF_CONT_S)
+#define SF_CONT_F    SF_CONT_V(1U)
+
+#define BYTECNT_S    1
+#define BYTECNT_V(x) ((x) << BYTECNT_S)
+
+#define OP_S    0
+#define OP_V(x) ((x) << OP_S)
+#define OP_F    OP_V(1U)
+
+#define PL_PF_INT_CAUSE_A 0x3c0
+
+#define PFSW_S    3
+#define PFSW_V(x) ((x) << PFSW_S)
+#define PFSW_F    PFSW_V(1U)
+
+#define PFCIM_S    1
+#define PFCIM_V(x) ((x) << PFCIM_S)
+#define PFCIM_F    PFCIM_V(1U)
+
+#define PL_PF_INT_ENABLE_A 0x3c4
+#define PL_PF_CTL_A 0x3c8
+
+#define PL_WHOAMI_A 0x19400
+
+#define SOURCEPF_S    8
+#define SOURCEPF_M    0x7U
+#define SOURCEPF_G(x) (((x) >> SOURCEPF_S) & SOURCEPF_M)
+
+#define PL_INT_CAUSE_A 0x1940c
+
+#define ULP_TX_S    27
+#define ULP_TX_V(x) ((x) << ULP_TX_S)
+#define ULP_TX_F    ULP_TX_V(1U)
+
+#define SGE_S    26
+#define SGE_V(x) ((x) << SGE_S)
+#define SGE_F    SGE_V(1U)
+
+#define CPL_SWITCH_S    24
+#define CPL_SWITCH_V(x) ((x) << CPL_SWITCH_S)
+#define CPL_SWITCH_F    CPL_SWITCH_V(1U)
+
+#define ULP_RX_S    23
+#define ULP_RX_V(x) ((x) << ULP_RX_S)
+#define ULP_RX_F    ULP_RX_V(1U)
+
+#define PM_RX_S    22
+#define PM_RX_V(x) ((x) << PM_RX_S)
+#define PM_RX_F    PM_RX_V(1U)
+
+#define PM_TX_S    21
+#define PM_TX_V(x) ((x) << PM_TX_S)
+#define PM_TX_F    PM_TX_V(1U)
+
+#define MA_S    20
+#define MA_V(x) ((x) << MA_S)
+#define MA_F    MA_V(1U)
+
+#define TP_S    19
+#define TP_V(x) ((x) << TP_S)
+#define TP_F    TP_V(1U)
+
+#define LE_S    18
+#define LE_V(x) ((x) << LE_S)
+#define LE_F    LE_V(1U)
+
+#define EDC1_S    17
+#define EDC1_V(x) ((x) << EDC1_S)
+#define EDC1_F    EDC1_V(1U)
+
+#define EDC0_S    16
+#define EDC0_V(x) ((x) << EDC0_S)
+#define EDC0_F    EDC0_V(1U)
+
+#define MC_S    15
+#define MC_V(x) ((x) << MC_S)
+#define MC_F    MC_V(1U)
+
+#define PCIE_S    14
+#define PCIE_V(x) ((x) << PCIE_S)
+#define PCIE_F    PCIE_V(1U)
+
+#define XGMAC_KR1_S    12
+#define XGMAC_KR1_V(x) ((x) << XGMAC_KR1_S)
+#define XGMAC_KR1_F    XGMAC_KR1_V(1U)
+
+#define XGMAC_KR0_S    11
+#define XGMAC_KR0_V(x) ((x) << XGMAC_KR0_S)
+#define XGMAC_KR0_F    XGMAC_KR0_V(1U)
+
+#define XGMAC1_S    10
+#define XGMAC1_V(x) ((x) << XGMAC1_S)
+#define XGMAC1_F    XGMAC1_V(1U)
+
+#define XGMAC0_S    9
+#define XGMAC0_V(x) ((x) << XGMAC0_S)
+#define XGMAC0_F    XGMAC0_V(1U)
+
+#define SMB_S    8
+#define SMB_V(x) ((x) << SMB_S)
+#define SMB_F    SMB_V(1U)
+
+#define SF_S    7
+#define SF_V(x) ((x) << SF_S)
+#define SF_F    SF_V(1U)
+
+#define PL_S    6
+#define PL_V(x) ((x) << PL_S)
+#define PL_F    PL_V(1U)
+
+#define NCSI_S    5
+#define NCSI_V(x) ((x) << NCSI_S)
+#define NCSI_F    NCSI_V(1U)
+
+#define MPS_S    4
+#define MPS_V(x) ((x) << MPS_S)
+#define MPS_F    MPS_V(1U)
+
+#define CIM_S    0
+#define CIM_V(x) ((x) << CIM_S)
+#define CIM_F    CIM_V(1U)
+
+#define MC1_S    31
+
+#define PL_INT_ENABLE_A 0x19410
+#define PL_INT_MAP0_A 0x19414
+#define PL_RST_A 0x19428
+
+#define PIORST_S    1
+#define PIORST_V(x) ((x) << PIORST_S)
+#define PIORST_F    PIORST_V(1U)
+
+#define PIORSTMODE_S    0
+#define PIORSTMODE_V(x) ((x) << PIORSTMODE_S)
+#define PIORSTMODE_F    PIORSTMODE_V(1U)
+
+#define PL_PL_INT_CAUSE_A 0x19430
+
+#define FATALPERR_S    4
+#define FATALPERR_V(x) ((x) << FATALPERR_S)
+#define FATALPERR_F    FATALPERR_V(1U)
+
+#define PERRVFID_S    0
+#define PERRVFID_V(x) ((x) << PERRVFID_S)
+#define PERRVFID_F    PERRVFID_V(1U)
+
+#define PL_REV_A 0x1943c
+
+#define REV_S    0
+#define REV_M    0xfU
+#define REV_V(x) ((x) << REV_S)
+#define REV_G(x) (((x) >> REV_S) & REV_M)
+
+#define LE_DB_INT_CAUSE_A 0x19c3c
+
+#define REQQPARERR_S    16
+#define REQQPARERR_V(x) ((x) << REQQPARERR_S)
+#define REQQPARERR_F    REQQPARERR_V(1U)
+
+#define UNKNOWNCMD_S    15
+#define UNKNOWNCMD_V(x) ((x) << UNKNOWNCMD_S)
+#define UNKNOWNCMD_F    UNKNOWNCMD_V(1U)
+
+#define PARITYERR_S    6
+#define PARITYERR_V(x) ((x) << PARITYERR_S)
+#define PARITYERR_F    PARITYERR_V(1U)
+
+#define LIPMISS_S    5
+#define LIPMISS_V(x) ((x) << LIPMISS_S)
+#define LIPMISS_F    LIPMISS_V(1U)
+
+#define LIP0_S    4
+#define LIP0_V(x) ((x) << LIP0_S)
+#define LIP0_F    LIP0_V(1U)
+
+#define NCSI_INT_CAUSE_A 0x1a0d8
+
+#define CIM_DM_PRTY_ERR_S    8
+#define CIM_DM_PRTY_ERR_V(x) ((x) << CIM_DM_PRTY_ERR_S)
+#define CIM_DM_PRTY_ERR_F    CIM_DM_PRTY_ERR_V(1U)
+
+#define MPS_DM_PRTY_ERR_S    7
+#define MPS_DM_PRTY_ERR_V(x) ((x) << MPS_DM_PRTY_ERR_S)
+#define MPS_DM_PRTY_ERR_F    MPS_DM_PRTY_ERR_V(1U)
+
+#define TXFIFO_PRTY_ERR_S    1
+#define TXFIFO_PRTY_ERR_V(x) ((x) << TXFIFO_PRTY_ERR_S)
+#define TXFIFO_PRTY_ERR_F    TXFIFO_PRTY_ERR_V(1U)
+
+#define RXFIFO_PRTY_ERR_S    0
+#define RXFIFO_PRTY_ERR_V(x) ((x) << RXFIFO_PRTY_ERR_S)
+#define RXFIFO_PRTY_ERR_F    RXFIFO_PRTY_ERR_V(1U)
+
+#define XGMAC_PORT_CFG2_A 0x1018
+
+#define PATEN_S    18
+#define PATEN_V(x) ((x) << PATEN_S)
+#define PATEN_F    PATEN_V(1U)
+
+#define MAGICEN_S    17
+#define MAGICEN_V(x) ((x) << MAGICEN_S)
+#define MAGICEN_F    MAGICEN_V(1U)
+
+#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
+#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
+
+#define XGMAC_PORT_EPIO_DATA0_A 0x10c0
+#define XGMAC_PORT_EPIO_DATA1_A 0x10c4
+#define XGMAC_PORT_EPIO_DATA2_A 0x10c8
+#define XGMAC_PORT_EPIO_DATA3_A 0x10cc
+#define XGMAC_PORT_EPIO_OP_A 0x10d0
+
+#define EPIOWR_S    8
+#define EPIOWR_V(x) ((x) << EPIOWR_S)
+#define EPIOWR_F    EPIOWR_V(1U)
+
+#define ADDRESS_S    0
+#define ADDRESS_V(x) ((x) << ADDRESS_S)
+
+#define MAC_PORT_INT_CAUSE_A 0x8dc
+#define XGMAC_PORT_INT_CAUSE_A 0x10dc
+
+#define TP_TX_MOD_QUEUE_REQ_MAP_A 0x7e28
+
+#define TP_TX_MOD_QUEUE_WEIGHT0_A 0x7e30
+#define TP_TX_MOD_CHANNEL_WEIGHT_A 0x7e34
+
+#define TX_MOD_QUEUE_REQ_MAP_S    0
+#define TX_MOD_QUEUE_REQ_MAP_V(x) ((x) << TX_MOD_QUEUE_REQ_MAP_S)
+
+#define TX_MODQ_WEIGHT3_S    24
+#define TX_MODQ_WEIGHT3_V(x) ((x) << TX_MODQ_WEIGHT3_S)
+
+#define TX_MODQ_WEIGHT2_S    16
+#define TX_MODQ_WEIGHT2_V(x) ((x) << TX_MODQ_WEIGHT2_S)
+
+#define TX_MODQ_WEIGHT1_S    8
+#define TX_MODQ_WEIGHT1_V(x) ((x) << TX_MODQ_WEIGHT1_S)
+
+#define TX_MODQ_WEIGHT0_S    0
+#define TX_MODQ_WEIGHT0_V(x) ((x) << TX_MODQ_WEIGHT0_S)
+
+#define TP_TX_SCHED_HDR_A 0x23
+#define TP_TX_SCHED_FIFO_A 0x24
+#define TP_TX_SCHED_PCMD_A 0x25
 
 #define NUM_MPS_CLS_SRAM_L_INSTANCES 336
 #define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
 #define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
 #define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
 
-#define MC_P_BIST_CMD 0x41400
-#define MC_P_BIST_CMD_ADDR 0x41404
-#define MC_P_BIST_CMD_LEN 0x41408
-#define MC_P_BIST_DATA_PATTERN 0x4140c
-#define MC_P_BIST_STATUS_RDATA 0x41488
-#define EDC_T50_BASE_ADDR 0x50000
-#define EDC_H_BIST_CMD 0x50004
-#define EDC_H_BIST_CMD_ADDR 0x50008
-#define EDC_H_BIST_CMD_LEN 0x5000c
-#define EDC_H_BIST_DATA_PATTERN 0x50010
-#define EDC_H_BIST_STATUS_RDATA 0x50028
-
-#define EDC_T51_BASE_ADDR 0x50800
+#define MC_P_BIST_CMD_A                        0x41400
+#define MC_P_BIST_CMD_ADDR_A           0x41404
+#define MC_P_BIST_CMD_LEN_A            0x41408
+#define MC_P_BIST_DATA_PATTERN_A       0x4140c
+#define MC_P_BIST_STATUS_RDATA_A       0x41488
+
+#define EDC_T50_BASE_ADDR              0x50000
+
+#define EDC_H_BIST_CMD_A               0x50004
+#define EDC_H_BIST_CMD_ADDR_A          0x50008
+#define EDC_H_BIST_CMD_LEN_A           0x5000c
+#define EDC_H_BIST_DATA_PATTERN_A      0x50010
+#define EDC_H_BIST_STATUS_RDATA_A      0x50028
+
+#define EDC_T51_BASE_ADDR              0x50800
+
 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
 
-#define A_PL_VF_REV 0x4
-#define A_PL_VF_WHOAMI 0x0
-#define A_PL_VF_REVISION 0x8
+#define PL_VF_REV_A 0x4
+#define PL_VF_WHOAMI_A 0x0
+#define PL_VF_REVISION_A 0x8
 
-#define S_CHIPID    4
-#define M_CHIPID    0xfU
-#define V_CHIPID(x) ((x) << S_CHIPID)
-#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
+/* registers for module CIM */
+#define CIM_HOST_ACC_CTRL_A    0x7b50
+#define CIM_HOST_ACC_DATA_A    0x7b54
+#define UP_UP_DBG_LA_CFG_A     0x140
+#define UP_UP_DBG_LA_DATA_A    0x144
 
-/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
- * Compressed Filter Tuple for LE filters.  Each bit set in TP_VLAN_PRI_MAP
- * selects for a particular field being present.  These fields, when present
- * in the Compressed Filter Tuple, have the following widths in bits.
- */
-#define W_FT_FCOE                       1
-#define W_FT_PORT                       3
-#define W_FT_VNIC_ID                    17
-#define W_FT_VLAN                       17
-#define W_FT_TOS                        8
-#define W_FT_PROTOCOL                   8
-#define W_FT_ETHERTYPE                  16
-#define W_FT_MACMATCH                   9
-#define W_FT_MPSHITTYPE                 3
-#define W_FT_FRAGMENTATION              1
-
-/* Some of the Compressed Filter Tuple fields have internal structure.  These
- * bit shifts/masks describe those structures.  All shifts are relative to the
- * base position of the fields within the Compressed Filter Tuple
- */
-#define S_FT_VLAN_VLD                   16
-#define V_FT_VLAN_VLD(x)                ((x) << S_FT_VLAN_VLD)
-#define F_FT_VLAN_VLD                   V_FT_VLAN_VLD(1U)
+#define HOSTBUSY_S     17
+#define HOSTBUSY_V(x)  ((x) << HOSTBUSY_S)
+#define HOSTBUSY_F     HOSTBUSY_V(1U)
+
+#define HOSTWRITE_S    16
+#define HOSTWRITE_V(x) ((x) << HOSTWRITE_S)
+#define HOSTWRITE_F    HOSTWRITE_V(1U)
+
+#define CIM_IBQ_DBG_CFG_A 0x7b60
+
+#define IBQDBGADDR_S    16
+#define IBQDBGADDR_M    0xfffU
+#define IBQDBGADDR_V(x) ((x) << IBQDBGADDR_S)
+#define IBQDBGADDR_G(x) (((x) >> IBQDBGADDR_S) & IBQDBGADDR_M)
+
+#define IBQDBGBUSY_S    1
+#define IBQDBGBUSY_V(x) ((x) << IBQDBGBUSY_S)
+#define IBQDBGBUSY_F    IBQDBGBUSY_V(1U)
+
+#define IBQDBGEN_S    0
+#define IBQDBGEN_V(x) ((x) << IBQDBGEN_S)
+#define IBQDBGEN_F    IBQDBGEN_V(1U)
+
+#define CIM_OBQ_DBG_CFG_A 0x7b64
+
+#define OBQDBGADDR_S    16
+#define OBQDBGADDR_M    0xfffU
+#define OBQDBGADDR_V(x) ((x) << OBQDBGADDR_S)
+#define OBQDBGADDR_G(x) (((x) >> OBQDBGADDR_S) & OBQDBGADDR_M)
+
+#define OBQDBGBUSY_S    1
+#define OBQDBGBUSY_V(x) ((x) << OBQDBGBUSY_S)
+#define OBQDBGBUSY_F    OBQDBGBUSY_V(1U)
+
+#define OBQDBGEN_S    0
+#define OBQDBGEN_V(x) ((x) << OBQDBGEN_S)
+#define OBQDBGEN_F    OBQDBGEN_V(1U)
+
+#define CIM_IBQ_DBG_DATA_A 0x7b68
+#define CIM_OBQ_DBG_DATA_A 0x7b6c
+
+#define UPDBGLARDEN_S          1
+#define UPDBGLARDEN_V(x)       ((x) << UPDBGLARDEN_S)
+#define UPDBGLARDEN_F          UPDBGLARDEN_V(1U)
+
+#define UPDBGLAEN_S    0
+#define UPDBGLAEN_V(x) ((x) << UPDBGLAEN_S)
+#define UPDBGLAEN_F    UPDBGLAEN_V(1U)
+
+#define UPDBGLARDPTR_S         2
+#define UPDBGLARDPTR_M         0xfffU
+#define UPDBGLARDPTR_V(x)      ((x) << UPDBGLARDPTR_S)
+
+#define UPDBGLAWRPTR_S    16
+#define UPDBGLAWRPTR_M    0xfffU
+#define UPDBGLAWRPTR_G(x) (((x) >> UPDBGLAWRPTR_S) & UPDBGLAWRPTR_M)
+
+#define UPDBGLACAPTPCONLY_S    30
+#define UPDBGLACAPTPCONLY_V(x) ((x) << UPDBGLACAPTPCONLY_S)
+#define UPDBGLACAPTPCONLY_F    UPDBGLACAPTPCONLY_V(1U)
+
+#define CIM_QUEUE_CONFIG_REF_A 0x7b48
+#define CIM_QUEUE_CONFIG_CTRL_A 0x7b4c
+
+#define CIMQSIZE_S    24
+#define CIMQSIZE_M    0x3fU
+#define CIMQSIZE_G(x) (((x) >> CIMQSIZE_S) & CIMQSIZE_M)
+
+#define CIMQBASE_S    16
+#define CIMQBASE_M    0x3fU
+#define CIMQBASE_G(x) (((x) >> CIMQBASE_S) & CIMQBASE_M)
+
+#define QUEFULLTHRSH_S    0
+#define QUEFULLTHRSH_M    0x1ffU
+#define QUEFULLTHRSH_G(x) (((x) >> QUEFULLTHRSH_S) & QUEFULLTHRSH_M)
+
+#define UP_IBQ_0_RDADDR_A 0x10
+#define UP_IBQ_0_SHADOW_RDADDR_A 0x280
+#define UP_OBQ_0_REALADDR_A 0x104
+#define UP_OBQ_0_SHADOW_REALADDR_A 0x394
+
+#define IBQRDADDR_S    0
+#define IBQRDADDR_M    0x1fffU
+#define IBQRDADDR_G(x) (((x) >> IBQRDADDR_S) & IBQRDADDR_M)
+
+#define IBQWRADDR_S    0
+#define IBQWRADDR_M    0x1fffU
+#define IBQWRADDR_G(x) (((x) >> IBQWRADDR_S) & IBQWRADDR_M)
+
+#define QUERDADDR_S    0
+#define QUERDADDR_M    0x7fffU
+#define QUERDADDR_G(x) (((x) >> QUERDADDR_S) & QUERDADDR_M)
+
+#define QUEREMFLITS_S    0
+#define QUEREMFLITS_M    0x7ffU
+#define QUEREMFLITS_G(x) (((x) >> QUEREMFLITS_S) & QUEREMFLITS_M)
+
+#define QUEEOPCNT_S    16
+#define QUEEOPCNT_M    0xfffU
+#define QUEEOPCNT_G(x) (((x) >> QUEEOPCNT_S) & QUEEOPCNT_M)
+
+#define QUESOPCNT_S    0
+#define QUESOPCNT_M    0xfffU
+#define QUESOPCNT_G(x) (((x) >> QUESOPCNT_S) & QUESOPCNT_M)
 
-#define S_FT_VNID_ID_VF                 0
-#define V_FT_VNID_ID_VF(x)              ((x) << S_FT_VNID_ID_VF)
+#define OBQSELECT_S    4
+#define OBQSELECT_V(x) ((x) << OBQSELECT_S)
+#define OBQSELECT_F    OBQSELECT_V(1U)
 
-#define S_FT_VNID_ID_PF                 7
-#define V_FT_VNID_ID_PF(x)              ((x) << S_FT_VNID_ID_PF)
+#define IBQSELECT_S    3
+#define IBQSELECT_V(x) ((x) << IBQSELECT_S)
+#define IBQSELECT_F    IBQSELECT_V(1U)
 
-#define S_FT_VNID_ID_VLD                16
-#define V_FT_VNID_ID_VLD(x)             ((x) << S_FT_VNID_ID_VLD)
+#define QUENUMSELECT_S    0
+#define QUENUMSELECT_V(x) ((x) << QUENUMSELECT_S)
 
 #endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
new file mode 100644 (file)
index 0000000..19b2dcf
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_VALUES_H__
+#define __T4_VALUES_H__
+
+/* This file contains definitions for various T4 register value hardware
+ * constants.  The types of values encoded here are predominantly those for
+ * register fields which control "modal" behavior.  For the most part, we do
+ * not include definitions for register fields which are simple numeric
+ * metrics, etc.
+ */
+
+/* SGE register field values.
+ */
+
+/* CONTROL1 register */
+#define RXPKTCPLMODE_SPLIT_X           1
+
+#define INGPCIEBOUNDARY_SHIFT_X                5
+#define INGPCIEBOUNDARY_32B_X          0
+
+#define INGPADBOUNDARY_SHIFT_X         5
+
+/* CONTROL2 register */
+#define INGPACKBOUNDARY_SHIFT_X                5
+#define INGPACKBOUNDARY_16B_X          0
+
+/* GTS register */
+#define SGE_TIMERREGS                  6
+#define TIMERREG_COUNTER0_X            0
+
+/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
+ * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
+ * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
+ * (IDXSIZE_UNIT_X) Gather Buffer interface at offset 64.  For Ingress Queues,
+ * we have a Going To Sleep register at offsets 8x+4.
+ *
+ * As noted above, we have many instances of the Simple Doorbell and Going To
+ * Sleep registers at offsets 8x and 8x+4, respectively.  We want to use a
+ * non-64-byte aligned offset for the Simple Doorbell in order to attempt to
+ * avoid buffering of the writes to the Simple Doorbell and we want to use a
+ * non-contiguous offset for the Going To Sleep writes in order to avoid
+ * possible combining between them.
+ */
+#define SGE_UDB_SIZE           128
+#define SGE_UDB_KDOORBELL      8
+#define SGE_UDB_GTS            20
+#define SGE_UDB_WCDOORBELL     64
+
+/* CIM register field values.
+ */
+#define X_MBOWNER_FW                   1
+#define X_MBOWNER_PL                   2
+
+/* PCI-E definitions */
+#define WINDOW_SHIFT_X         10
+#define PCIEOFST_SHIFT_X       10
+
+/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
+ * Compressed Filter Tuple for LE filters.  Each bit set in TP_VLAN_PRI_MAP
+ * selects for a particular field being present.  These fields, when present
+ * in the Compressed Filter Tuple, have the following widths in bits.
+ */
+#define FT_FCOE_W                       1
+#define FT_PORT_W                       3
+#define FT_VNIC_ID_W                    17
+#define FT_VLAN_W                       17
+#define FT_TOS_W                        8
+#define FT_PROTOCOL_W                   8
+#define FT_ETHERTYPE_W                  16
+#define FT_MACMATCH_W                   9
+#define FT_MPSHITTYPE_W                 3
+#define FT_FRAGMENTATION_W              1
+
+/* Some of the Compressed Filter Tuple fields have internal structure.  These
+ * bit shifts/masks describe those structures.  All shifts are relative to the
+ * base position of the fields within the Compressed Filter Tuple
+ */
+#define FT_VLAN_VLD_S                   16
+#define FT_VLAN_VLD_V(x)                ((x) << FT_VLAN_VLD_S)
+#define FT_VLAN_VLD_F                   FT_VLAN_VLD_V(1U)
+
+#define FT_VNID_ID_VF_S                 0
+#define FT_VNID_ID_VF_V(x)              ((x) << FT_VNID_ID_VF_S)
+
+#define FT_VNID_ID_PF_S                 7
+#define FT_VNID_ID_PF_V(x)              ((x) << FT_VNID_ID_PF_S)
+
+#define FT_VNID_ID_VLD_S                16
+#define FT_VNID_ID_VLD_V(x)             ((x) << FT_VNID_ID_VLD_S)
+
+#endif /* __T4_VALUES_H__ */
index 7c0aec85137a2acee4a892afc5ec14c16731a26e..95fc425375c45815b9f9b789d4124c01f3eafab2 100644 (file)
@@ -673,6 +673,7 @@ enum fw_cmd_opcodes {
        FW_RSS_IND_TBL_CMD             = 0x20,
        FW_RSS_GLB_CONFIG_CMD          = 0x22,
        FW_RSS_VI_CONFIG_CMD           = 0x23,
+       FW_DEVLOG_CMD                  = 0x25,
        FW_CLIP_CMD                    = 0x28,
        FW_LASTC2E_CMD                 = 0x40,
        FW_ERROR_CMD                   = 0x80,
@@ -1058,9 +1059,11 @@ enum fw_params_param_dev {
        FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
        FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
        FW_PARAMS_PARAM_DEV_CF = 0x0D,
+       FW_PARAMS_PARAM_DEV_DIAG = 0x11,
        FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */
        FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
        FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
+       FW_PARAMS_PARAM_DEV_FWCACHE = 0x18,
 };
 
 /*
@@ -1120,6 +1123,16 @@ enum fw_params_param_dmaq {
        FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
 };
 
+enum fw_params_param_dev_diag {
+       FW_PARAM_DEV_DIAG_TMP           = 0x00,
+       FW_PARAM_DEV_DIAG_VDD           = 0x01,
+};
+
+enum fw_params_param_dev_fwcache {
+       FW_PARAM_DEV_FWCACHE_FLUSH      = 0x00,
+       FW_PARAM_DEV_FWCACHE_FLUSHINV   = 0x01,
+};
+
 #define FW_PARAMS_MNEM_S       24
 #define FW_PARAMS_MNEM_V(x)    ((x) << FW_PARAMS_MNEM_S)
 
@@ -3038,4 +3051,84 @@ enum fw_hdr_flags {
        FW_HDR_FLAGS_RESET_HALT = 0x00000001,
 };
 
+/* length of the formatting string  */
+#define FW_DEVLOG_FMT_LEN      192
+
+/* maximum number of the formatting string parameters */
+#define FW_DEVLOG_FMT_PARAMS_NUM 8
+
+/* priority levels */
+enum fw_devlog_level {
+       FW_DEVLOG_LEVEL_EMERG   = 0x0,
+       FW_DEVLOG_LEVEL_CRIT    = 0x1,
+       FW_DEVLOG_LEVEL_ERR     = 0x2,
+       FW_DEVLOG_LEVEL_NOTICE  = 0x3,
+       FW_DEVLOG_LEVEL_INFO    = 0x4,
+       FW_DEVLOG_LEVEL_DEBUG   = 0x5,
+       FW_DEVLOG_LEVEL_MAX     = 0x5,
+};
+
+/* facilities that may send a log message */
+enum fw_devlog_facility {
+       FW_DEVLOG_FACILITY_CORE         = 0x00,
+       FW_DEVLOG_FACILITY_CF           = 0x01,
+       FW_DEVLOG_FACILITY_SCHED        = 0x02,
+       FW_DEVLOG_FACILITY_TIMER        = 0x04,
+       FW_DEVLOG_FACILITY_RES          = 0x06,
+       FW_DEVLOG_FACILITY_HW           = 0x08,
+       FW_DEVLOG_FACILITY_FLR          = 0x10,
+       FW_DEVLOG_FACILITY_DMAQ         = 0x12,
+       FW_DEVLOG_FACILITY_PHY          = 0x14,
+       FW_DEVLOG_FACILITY_MAC          = 0x16,
+       FW_DEVLOG_FACILITY_PORT         = 0x18,
+       FW_DEVLOG_FACILITY_VI           = 0x1A,
+       FW_DEVLOG_FACILITY_FILTER       = 0x1C,
+       FW_DEVLOG_FACILITY_ACL          = 0x1E,
+       FW_DEVLOG_FACILITY_TM           = 0x20,
+       FW_DEVLOG_FACILITY_QFC          = 0x22,
+       FW_DEVLOG_FACILITY_DCB          = 0x24,
+       FW_DEVLOG_FACILITY_ETH          = 0x26,
+       FW_DEVLOG_FACILITY_OFLD         = 0x28,
+       FW_DEVLOG_FACILITY_RI           = 0x2A,
+       FW_DEVLOG_FACILITY_ISCSI        = 0x2C,
+       FW_DEVLOG_FACILITY_FCOE         = 0x2E,
+       FW_DEVLOG_FACILITY_FOISCSI      = 0x30,
+       FW_DEVLOG_FACILITY_FOFCOE       = 0x32,
+       FW_DEVLOG_FACILITY_MAX          = 0x32,
+};
+
+/* log message format */
+struct fw_devlog_e {
+       __be64  timestamp;
+       __be32  seqno;
+       __be16  reserved1;
+       __u8    level;
+       __u8    facility;
+       __u8    fmt[FW_DEVLOG_FMT_LEN];
+       __be32  params[FW_DEVLOG_FMT_PARAMS_NUM];
+       __be32  reserved3[4];
+};
+
+struct fw_devlog_cmd {
+       __be32 op_to_write;
+       __be32 retval_len16;
+       __u8   level;
+       __u8   r2[7];
+       __be32 memtype_devlog_memaddr16_devlog;
+       __be32 memsize_devlog;
+       __be32 r3[2];
+};
+
+#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S         28
+#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M         0xf
+#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(x)      \
+       (((x) >> FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S) & \
+        FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M)
+
+#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S       0
+#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M       0xfffffff
+#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(x)    \
+       (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
+        FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
+
 #endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
new file mode 100644 (file)
index 0000000..e2bd3f7
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4FW_VERSION_H__
+#define __T4FW_VERSION_H__
+
+#define T4FW_VERSION_MAJOR 0x01
+#define T4FW_VERSION_MINOR 0x0C
+#define T4FW_VERSION_MICRO 0x19
+#define T4FW_VERSION_BUILD 0x00
+
+#define T5FW_VERSION_MAJOR 0x01
+#define T5FW_VERSION_MINOR 0x0C
+#define T5FW_VERSION_MICRO 0x19
+#define T5FW_VERSION_BUILD 0x00
+
+#endif
index 2215d432a05958ddb25e6b0d1a4bee27562f748a..122e2964e63b757f9b73e35781fca71f2ee2ef56 100644 (file)
@@ -380,9 +380,9 @@ static void qenable(struct sge_rspq *rspq)
         * enable interrupts.
         */
        t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
-                    CIDXINC(0) |
-                    SEINTARM(rspq->intr_params) |
-                    INGRESSQID(rspq->cntxt_id));
+                    CIDXINC_V(0) |
+                    SEINTARM_V(rspq->intr_params) |
+                    INGRESSQID_V(rspq->cntxt_id));
 }
 
 /*
@@ -403,9 +403,9 @@ static void enable_rx(struct adapter *adapter)
         */
        if (adapter->flags & USING_MSI)
                t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
-                            CIDXINC(0) |
-                            SEINTARM(s->intrq.intr_params) |
-                            INGRESSQID(s->intrq.cntxt_id));
+                            CIDXINC_V(0) |
+                            SEINTARM_V(s->intrq.intr_params) |
+                            INGRESSQID_V(s->intrq.cntxt_id));
 
 }
 
@@ -450,7 +450,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
                /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
                 */
                const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
-               opcode = G_CPL_OPCODE(ntohl(p->opcode_qid));
+               opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
                if (opcode != CPL_SGE_EGR_UPDATE) {
                        dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
                                , opcode);
@@ -471,7 +471,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
                 * free TX Queue Descriptors ...
                 */
                const struct cpl_sge_egr_update *p = cpl;
-               unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
+               unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
                struct sge *s = &adapter->sge;
                struct sge_txq *tq;
                struct sge_eth_txq *txq;
@@ -1673,7 +1673,7 @@ static void cxgb4vf_get_regs(struct net_device *dev,
        reg_block_dump(adapter, regbuf,
                       T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
                       T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
-                      ? A_PL_VF_WHOAMI : A_PL_VF_REVISION));
+                      ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
        reg_block_dump(adapter, regbuf,
                       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
                       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
@@ -2294,26 +2294,22 @@ static int adap_init0(struct adapter *adapter)
         * threshold values from the SGE parameters.
         */
        s->timer_val[0] = core_ticks_to_us(adapter,
-               TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1));
+               TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
        s->timer_val[1] = core_ticks_to_us(adapter,
-               TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1));
+               TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
        s->timer_val[2] = core_ticks_to_us(adapter,
-               TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3));
+               TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
        s->timer_val[3] = core_ticks_to_us(adapter,
-               TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3));
+               TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
        s->timer_val[4] = core_ticks_to_us(adapter,
-               TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5));
+               TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
        s->timer_val[5] = core_ticks_to_us(adapter,
-               TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5));
+               TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
 
-       s->counter_val[0] =
-               THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold);
-       s->counter_val[1] =
-               THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold);
-       s->counter_val[2] =
-               THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
-       s->counter_val[3] =
-               THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
+       s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
+       s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
+       s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
+       s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
 
        /*
         * Grab our Virtual Interface resource allocation, extract the
@@ -2430,7 +2426,7 @@ static void cfg_queues(struct adapter *adapter)
         */
        n10g = 0;
        for_each_port(adapter, pidx)
-               n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
+               n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
 
        /*
         * We default to 1 queue per non-10G port and up to # of cores queues
index f7fd1317d99675515b78dec60b7fe1b3e5a228c5..0545f0de1c52be282af53d6a5f03916f93ad7013 100644 (file)
@@ -47,6 +47,7 @@
 #include "t4vf_defs.h"
 
 #include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4_values.h"
 #include "../cxgb4/t4fw_api.h"
 #include "../cxgb4/t4_msg.h"
 
@@ -531,11 +532,11 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
         */
        if (fl->pend_cred >= FL_PER_EQ_UNIT) {
                if (is_t4(adapter->params.chip))
-                       val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
+                       val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
                else
-                       val = PIDX_T5(fl->pend_cred / FL_PER_EQ_UNIT) |
-                             DBTYPE(1);
-               val |= DBPRIO(1);
+                       val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
+                             DBTYPE_F;
+               val |= DBPRIO_F;
 
                /* Make sure all memory writes to the Free List queue are
                 * committed before we tell the hardware about them.
@@ -549,9 +550,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
                if (unlikely(fl->bar2_addr == NULL)) {
                        t4_write_reg(adapter,
                                     T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
-                                    QID(fl->cntxt_id) | val);
+                                    QID_V(fl->cntxt_id) | val);
                } else {
-                       writel(val | QID(fl->bar2_qid),
+                       writel(val | QID_V(fl->bar2_qid),
                               fl->bar2_addr + SGE_UDB_KDOORBELL);
 
                        /* This Write memory Barrier will force the write to
@@ -925,7 +926,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
        }
 
        sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
-                             ULPTX_NSGE(nfrags));
+                             ULPTX_NSGE_V(nfrags));
        if (likely(--nfrags == 0))
                return;
        /*
@@ -979,12 +980,12 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
         * doorbell mechanism; otherwise use the new BAR2 mechanism.
         */
        if (unlikely(tq->bar2_addr == NULL)) {
-               u32 val = PIDX(n);
+               u32 val = PIDX_V(n);
 
                t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
-                            QID(tq->cntxt_id) | val);
+                            QID_V(tq->cntxt_id) | val);
        } else {
-               u32 val = PIDX_T5(n);
+               u32 val = PIDX_T5_V(n);
 
                /* T4 and later chips share the same PIDX field offset within
                 * the doorbell, but T5 and later shrank the field in order to
@@ -992,7 +993,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
                 * large in the first place (14 bits) so we just use the T5
                 * and later limits and warn if a Queue ID is too large.
                 */
-               WARN_ON(val & DBPRIO(1));
+               WARN_ON(val & DBPRIO_F);
 
                /* If we're only writing a single Egress Unit and the BAR2
                 * Queue ID is 0, we can use the Write Combining Doorbell
@@ -1023,7 +1024,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
                                count--;
                        }
                } else
-                       writel(val | QID(tq->bar2_qid),
+                       writel(val | QID_V(tq->bar2_qid),
                               tq->bar2_addr + SGE_UDB_KDOORBELL);
 
                /* This Write Memory Barrier will force the write to the User
@@ -1325,9 +1326,9 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
         * If there's a VLAN tag present, add that to the list of things to
         * do in this Work Request.
         */
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                txq->vlan_ins++;
-               cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+               cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
        }
 
        /*
@@ -1603,7 +1604,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
         * If this is a good TCP packet and we have Generic Receive Offload
         * enabled, handle the packet in the GRO path.
         */
-       if ((pkt->l2info & cpu_to_be32(RXF_TCP)) &&
+       if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
            (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
            !pkt->ip_frag) {
                do_gro(rxq, gl, pkt);
@@ -1625,7 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
        rxq->stats.pkts++;
 
        if (csum_ok && !pkt->err_vec &&
-           (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+           (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
                if (!pkt->ip_frag)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                else {
@@ -1875,13 +1876,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
        if (unlikely(work_done == 0))
                rspq->unhandled_irqs++;
 
-       val = CIDXINC(work_done) | SEINTARM(intr_params);
+       val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
        if (is_t4(rspq->adapter->params.chip)) {
                t4_write_reg(rspq->adapter,
                             T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
-                            val | INGRESSQID((u32)rspq->cntxt_id));
+                            val | INGRESSQID_V((u32)rspq->cntxt_id));
        } else {
-               writel(val | INGRESSQID(rspq->bar2_qid),
+               writel(val | INGRESSQID_V(rspq->bar2_qid),
                       rspq->bar2_addr + SGE_UDB_GTS);
                wmb();
        }
@@ -1975,12 +1976,12 @@ static unsigned int process_intrq(struct adapter *adapter)
                rspq_next(intrq);
        }
 
-       val = CIDXINC(work_done) | SEINTARM(intrq->intr_params);
+       val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
        if (is_t4(adapter->params.chip))
                t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
-                            val | INGRESSQID(intrq->cntxt_id));
+                            val | INGRESSQID_V(intrq->cntxt_id));
        else {
-               writel(val | INGRESSQID(intrq->bar2_qid),
+               writel(val | INGRESSQID_V(intrq->bar2_qid),
                       intrq->bar2_addr + SGE_UDB_GTS);
                wmb();
        }
@@ -2583,7 +2584,7 @@ int t4vf_sge_init(struct adapter *adapter)
                        fl0, fl1);
                return -EINVAL;
        }
-       if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) {
+       if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
                dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
                return -EINVAL;
        }
@@ -2593,9 +2594,9 @@ int t4vf_sge_init(struct adapter *adapter)
         */
        if (fl1)
                s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
-       s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
+       s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
                        ? 128 : 64);
-       s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
+       s->pktshift = PKTSHIFT_G(sge_params->sge_control);
 
        /* T4 uses a single control field to specify both the PCIe Padding and
         * Packing Boundary.  T5 introduced the ability to specify these
@@ -2607,8 +2608,8 @@ int t4vf_sge_init(struct adapter *adapter)
         * end doing this because it would initialize the Padding Boundary and
         * leave the Packing Boundary initialized to 0 (16 bytes).)
         */
-       ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
-                              X_INGPADBOUNDARY_SHIFT);
+       ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
+                              INGPADBOUNDARY_SHIFT_X);
        if (is_t4(adapter->params.chip)) {
                s->fl_align = ingpadboundary;
        } else {
@@ -2633,7 +2634,7 @@ int t4vf_sge_init(struct adapter *adapter)
         * Congestion Threshold is in units of 2 Free List pointers.)
         */
        s->fl_starve_thres
-               = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
+               = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
 
        /*
         * Set up tasklet timers.
index c7b127d937671698a400b5b73187eb28c1a51c2f..b516b12b188459eabf2397f7dcdbfb17279c7a48 100644 (file)
@@ -64,8 +64,8 @@
  * Mailbox Data in the fixed CIM PF map and the programmable VF map must
  * match.  However, it's a useful convention ...
  */
-#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA
-#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA!
+#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA_A
+#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA_A!
 #endif
 
 /*
index 21dc9a20308c58dabef4b77b1fad338547e20df3..1b5506df35b15ab74eaf1ecea220da4ad6278a3f 100644 (file)
@@ -39,6 +39,7 @@
 #include "t4vf_defs.h"
 
 #include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4_values.h"
 #include "../cxgb4/t4fw_api.h"
 
 /*
@@ -137,9 +138,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
         * Loop trying to get ownership of the mailbox.  Return an error
         * if we can't gain ownership.
         */
-       v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
+       v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
        for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
-               v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
+               v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
        if (v != MBOX_OWNER_DRV)
                return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
 
@@ -161,7 +162,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
        t4_read_reg(adapter, mbox_data);         /* flush write */
 
        t4_write_reg(adapter, mbox_ctl,
-                    MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
+                    MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
        t4_read_reg(adapter, mbox_ctl);          /* flush write */
 
        /*
@@ -183,14 +184,14 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
                 * If we're the owner, see if this is the reply we wanted.
                 */
                v = t4_read_reg(adapter, mbox_ctl);
-               if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
+               if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
                        /*
                         * If the Message Valid bit isn't on, revoke ownership
                         * of the mailbox and continue waiting for our reply.
                         */
-                       if ((v & MBMSGVALID) == 0) {
+                       if ((v & MBMSGVALID_F) == 0) {
                                t4_write_reg(adapter, mbox_ctl,
-                                            MBOWNER(MBOX_OWNER_NONE));
+                                            MBOWNER_V(MBOX_OWNER_NONE));
                                continue;
                        }
 
@@ -216,7 +217,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
                                         & FW_CMD_REQUEST_F) != 0);
                        }
                        t4_write_reg(adapter, mbox_ctl,
-                                    MBOWNER(MBOX_OWNER_NONE));
+                                    MBOWNER_V(MBOX_OWNER_NONE));
                        return -FW_CMD_RETVAL_G(v);
                }
        }
@@ -323,6 +324,8 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
                return v;
 
        v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
+       pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
+                       FW_PORT_CMD_MDIOADDR_G(v) : -1;
        pi->port_type = FW_PORT_CMD_PTYPE_G(v);
        pi->mod_type = FW_PORT_MOD_TYPE_NA;
 
@@ -528,19 +531,19 @@ int t4vf_get_sge_params(struct adapter *adapter)
        int v;
 
        params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
        params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
        params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
        params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
        params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A));
        params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A));
        params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A));
        v = t4vf_query_params(adapter, 7, params, vals);
        if (v)
                return v;
@@ -576,9 +579,9 @@ int t4vf_get_sge_params(struct adapter *adapter)
        }
 
        params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
        params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
-                    FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL));
+                    FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
        v = t4vf_query_params(adapter, 2, params, vals);
        if (v)
                return v;
@@ -615,8 +618,8 @@ int t4vf_get_sge_params(struct adapter *adapter)
                 * the driver can just use it.
                 */
                whoami = t4_read_reg(adapter,
-                                    T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI);
-               pf = SOURCEPF_GET(whoami);
+                                    T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
+               pf = SOURCEPF_G(whoami);
 
                s_hps = (HOSTPAGESIZEPF0_S +
                         (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
@@ -628,10 +631,10 @@ int t4vf_get_sge_params(struct adapter *adapter)
                         (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
                sge_params->sge_vf_eq_qpp =
                        ((sge_params->sge_egress_queues_per_page >> s_qpp)
-                        & QUEUESPERPAGEPF0_MASK);
+                        & QUEUESPERPAGEPF0_M);
                sge_params->sge_vf_iq_qpp =
                        ((sge_params->sge_ingress_queues_per_page >> s_qpp)
-                        & QUEUESPERPAGEPF0_MASK);
+                        & QUEUESPERPAGEPF0_M);
        }
 
        return 0;
@@ -1590,7 +1593,7 @@ int t4vf_prep_adapter(struct adapter *adapter)
                break;
 
        case CHELSIO_T5:
-               chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
+               chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
                adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
                break;
        }
index 7403dff8f14a4cf38090f8b32c57645d91b1770e..905ac5f5d9a60037746998d2f038ee738c04ce4f 100644 (file)
@@ -32,7 +32,8 @@ config CS89x0
          will be called cs89x0.
 
 config CS89x0_PLATFORM
-       bool "CS89x0 platform driver support"
+       bool "CS89x0 platform driver support" if HAS_IOPORT_MAP
+       default !HAS_IOPORT_MAP
        depends on CS89x0
        help
          Say Y to compile the cs89x0 driver as a platform driver. This
index 3a12c096ea1c2943b84c2489f92a9dcc62d2ecc4..de9f7c97d916d3d8d760282e726cf49f4181a56b 100644 (file)
@@ -475,8 +475,7 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
                if (d)
                        dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
 
-               if (ep->rx_buf[i] != NULL)
-                       kfree(ep->rx_buf[i]);
+               kfree(ep->rx_buf[i]);
        }
 
        for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
@@ -486,8 +485,7 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
                if (d)
                        dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
 
-               if (ep->tx_buf[i] != NULL)
-                       kfree(ep->tx_buf[i]);
+               kfree(ep->tx_buf[i]);
        }
 
        dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
index d2a103547c67273608a8d8b9a098ad801d00a5ea..84b6a2b46aec474959c69e84288386dc1d499282 100644 (file)
@@ -33,7 +33,7 @@
 
 #define DRV_NAME               "enic"
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION            "2.1.1.67"
+#define DRV_VERSION            "2.1.1.83"
 #define DRV_COPYRIGHT          "Copyright 2008-2013 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX          6
index 0c396c1f55dc9e8290047033d1c4bf1e0bdcbb8e..28d9ca675a274f9876473bcce7e6995a14e1289e 100644 (file)
@@ -92,7 +92,7 @@ static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
 static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
 static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
 
-void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
+static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
 {
        int i;
        int intr;
index 142c9b5509ae7b84e82a684527775e0e728cbbb3..9cbe038a388ea62a6f4552e7f088dc5816dc5b5c 100644 (file)
@@ -45,6 +45,7 @@
 #ifdef CONFIG_NET_RX_BUSY_POLL
 #include <net/busy_poll.h>
 #endif
+#include <linux/crash_dump.h>
 
 #include "cq_enet_desc.h"
 #include "vnic_dev.h"
@@ -88,7 +89,7 @@ MODULE_DEVICE_TABLE(pci, enic_id_table);
  *  coalescing timer values
  *  {rx_rate in Mbps, mapping percentage of the range}
  */
-struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
+static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
        {4000,  0},
        {4400, 10},
        {5060, 20},
@@ -105,7 +106,7 @@ struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
 /* This table helps the driver to pick different ranges for rx coalescing
  * timer depending on the link speed.
  */
-struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
+static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
        {0,  0}, /* 0  - 4  Gbps */
        {0,  3}, /* 4  - 10 Gbps */
        {3,  6}, /* 10 - 40 Gbps */
@@ -519,10 +520,10 @@ static inline void enic_queue_wq_skb(struct enic *enic,
        int loopback = 0;
        int err;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                /* VLAN tag from trunking driver */
                vlan_tag_insert = 1;
-               vlan_tag = vlan_tx_tag_get(skb);
+               vlan_tag = skb_vlan_tag_get(skb);
        } else if (enic->loop_enable) {
                vlan_tag = enic->loop_tag;
                loopback = 1;
@@ -1302,7 +1303,7 @@ static void enic_set_rx_cpu_rmap(struct enic *enic)
 #endif /* CONFIG_RFS_ACCEL */
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
-int enic_busy_poll(struct napi_struct *napi)
+static int enic_busy_poll(struct napi_struct *napi)
 {
        struct net_device *netdev = napi->dev;
        struct enic *enic = netdev_priv(netdev);
@@ -1371,7 +1372,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
        int err;
 
        if (!enic_poll_lock_napi(&enic->rq[rq]))
-               return work_done;
+               return budget;
        /* Service RQ
         */
 
@@ -1652,7 +1653,7 @@ static int enic_open(struct net_device *netdev)
                if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
                        netdev_err(netdev, "Unable to alloc receive buffers\n");
                        err = -ENOMEM;
-                       goto err_out_notify_unset;
+                       goto err_out_free_rq;
                }
        }
 
@@ -1685,7 +1686,9 @@ static int enic_open(struct net_device *netdev)
 
        return 0;
 
-err_out_notify_unset:
+err_out_free_rq:
+       for (i = 0; i < enic->rq_count; i++)
+               vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
        enic_dev_notify_unset(enic);
 err_out_free_intr:
        enic_free_intr(enic);
@@ -2265,6 +2268,18 @@ static void enic_dev_deinit(struct enic *enic)
        enic_clear_intr_mode(enic);
 }
 
+static void enic_kdump_kernel_config(struct enic *enic)
+{
+       if (is_kdump_kernel()) {
+               dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
+               enic->rq_count = 1;
+               enic->wq_count = 1;
+               enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
+               enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
+               enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
+       }
+}
+
 static int enic_dev_init(struct enic *enic)
 {
        struct device *dev = enic_get_dev(enic);
@@ -2294,6 +2309,10 @@ static int enic_dev_init(struct enic *enic)
 
        enic_get_res_counts(enic);
 
+       /* modify resource count if we are in kdump_kernel
+        */
+       enic_kdump_kernel_config(enic);
+
        /* Set interrupt mode based on resource counts and system
         * capabilities
         */
index ef0bb58750e65b1ff6e67de2d4710c2c48d20302..c0a7813603c3d1c7f9df5bd783c395a61d83b54b 100644 (file)
@@ -36,6 +36,9 @@
 #include <linux/platform_device.h>
 #include <linux/irq.h>
 #include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
 
 #include <asm/delay.h>
 #include <asm/irq.h>
@@ -1426,11 +1429,48 @@ dm9000_probe(struct platform_device *pdev)
        struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
        struct board_info *db;  /* Point a board information structure */
        struct net_device *ndev;
+       struct device *dev = &pdev->dev;
        const unsigned char *mac_src;
        int ret = 0;
        int iosize;
        int i;
        u32 id_val;
+       int reset_gpios;
+       enum of_gpio_flags flags;
+       struct regulator *power;
+
+       power = devm_regulator_get(dev, "vcc");
+       if (IS_ERR(power)) {
+               if (PTR_ERR(power) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+               dev_dbg(dev, "no regulator provided\n");
+       } else {
+               ret = regulator_enable(power);
+               if (ret != 0) {
+                       dev_err(dev,
+                               "Failed to enable power regulator: %d\n", ret);
+                       return ret;
+               }
+               dev_dbg(dev, "regulator enabled\n");
+       }
+
+       reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
+                                             &flags);
+       if (gpio_is_valid(reset_gpios)) {
+               ret = devm_gpio_request_one(dev, reset_gpios, flags,
+                                           "dm9000_reset");
+               if (ret) {
+                       dev_err(dev, "failed to request reset gpio %d: %d\n",
+                               reset_gpios, ret);
+                       return -ENODEV;
+               }
+
+               /* According to manual PWRST# Low Period Min 1ms */
+               msleep(2);
+               gpio_set_value(reset_gpios, 1);
+               /* Needs 3ms to read eeprom when PWRST is deasserted */
+               msleep(4);
+       }
 
        if (!pdata) {
                pdata = dm9000_parse_dt(&pdev->dev);
index 6aa887e0e1cbf69aebfdc22b148eac9b9f5f75f9..9beb3d34d4bad9f81ec8c11c560150d8b4640ed8 100644 (file)
@@ -904,7 +904,7 @@ static void init_registers(struct net_device *dev)
        }
 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
        i |= 0xE000;
-#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC)
+#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
        i |= 0x4800;
 #else
 #warning Processor architecture undefined
index a379c3e4b57f73ef3fd133bd4bb5114e29f10cb4..13d00a38a5bd60ed1e7af054f3a22b617e64317d 100644 (file)
@@ -398,13 +398,8 @@ static int dnet_poll(struct napi_struct *napi, int budget)
                 * break out of while loop if there are no more
                 * packets waiting
                 */
-               if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) {
-                       napi_complete(napi);
-                       int_enable = dnet_readl(bp, INTR_ENB);
-                       int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
-                       dnet_writel(bp, int_enable, INTR_ENB);
-                       return 0;
-               }
+               if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
+                       break;
 
                cmd_word = dnet_readl(bp, RX_LEN_FIFO);
                pkt_len = cmd_word & 0xFFFF;
@@ -433,20 +428,17 @@ static int dnet_poll(struct napi_struct *napi, int budget)
                               "size %u.\n", dev->name, pkt_len);
        }
 
-       budget -= npackets;
-
        if (npackets < budget) {
                /* We processed all packets available.  Tell NAPI it can
-                * stop polling then re-enable rx interrupts */
+                * stop polling then re-enable rx interrupts.
+                */
                napi_complete(napi);
                int_enable = dnet_readl(bp, INTR_ENB);
                int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
                dnet_writel(bp, int_enable, INTR_ENB);
-               return 0;
        }
 
-       /* There are still packets waiting */
-       return 1;
+       return npackets;
 }
 
 static irqreturn_t dnet_interrupt(int irq, void *dev_id)
index 712e7f8e1df7e820179abbc9c51665a0e315f028..27de37aa90afe12b2d564d87c9b13246160d39aa 100644 (file)
 #define OC_SUBSYS_DEVICE_ID3   0xE612
 #define OC_SUBSYS_DEVICE_ID4   0xE652
 
-static inline char *nic_name(struct pci_dev *pdev)
-{
-       switch (pdev->device) {
-       case OC_DEVICE_ID1:
-               return OC_NAME;
-       case OC_DEVICE_ID2:
-               return OC_NAME_BE;
-       case OC_DEVICE_ID3:
-       case OC_DEVICE_ID4:
-               return OC_NAME_LANCER;
-       case BE_DEVICE_ID2:
-               return BE3_NAME;
-       case OC_DEVICE_ID5:
-       case OC_DEVICE_ID6:
-               return OC_NAME_SH;
-       default:
-               return BE_NAME;
-       }
-}
-
 /* Number of bytes of an RX frame that are copied to skb->data */
 #define BE_HDR_LEN             ((u16) 64)
 /* allocate extra space to allow tunneling decapsulation without head reallocation */
@@ -243,7 +223,6 @@ struct be_tx_stats {
        u64 tx_bytes;
        u64 tx_pkts;
        u64 tx_reqs;
-       u64 tx_wrbs;
        u64 tx_compl;
        ulong tx_jiffies;
        u32 tx_stops;
@@ -266,6 +245,9 @@ struct be_tx_obj {
        /* Remember the skbs that were transmitted */
        struct sk_buff *sent_skb_list[TX_Q_LEN];
        struct be_tx_stats stats;
+       u16 pend_wrb_cnt;       /* Number of WRBs yet to be given to HW */
+       u16 last_req_wrb_cnt;   /* wrb cnt of the last req in the Q */
+       u16 last_req_hdr;       /* index of the last req's hdr-wrb */
 } ____cacheline_aligned_in_smp;
 
 /* Struct to remember the pages posted for rx frags */
@@ -379,15 +361,14 @@ enum vf_state {
        ASSIGNED = 1
 };
 
-#define BE_FLAGS_LINK_STATUS_INIT              1
-#define BE_FLAGS_SRIOV_ENABLED                 (1 << 2)
-#define BE_FLAGS_WORKER_SCHEDULED              (1 << 3)
-#define BE_FLAGS_VLAN_PROMISC                  (1 << 4)
-#define BE_FLAGS_MCAST_PROMISC                 (1 << 5)
-#define BE_FLAGS_NAPI_ENABLED                  (1 << 9)
-#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD            (1 << 11)
-#define BE_FLAGS_VXLAN_OFFLOADS                        (1 << 12)
-#define BE_FLAGS_SETUP_DONE                    (1 << 13)
+#define BE_FLAGS_LINK_STATUS_INIT              BIT(1)
+#define BE_FLAGS_SRIOV_ENABLED                 BIT(2)
+#define BE_FLAGS_WORKER_SCHEDULED              BIT(3)
+#define BE_FLAGS_NAPI_ENABLED                  BIT(6)
+#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD            BIT(7)
+#define BE_FLAGS_VXLAN_OFFLOADS                        BIT(8)
+#define BE_FLAGS_SETUP_DONE                    BIT(9)
+#define BE_FLAGS_EVT_INCOMPATIBLE_SFP          BIT(10)
 
 #define BE_UC_PMAC_COUNT                       30
 #define BE_VF_UC_PMAC_COUNT                    2
@@ -397,6 +378,8 @@ enum vf_state {
 #define LANCER_DELETE_FW_DUMP                  0x2
 
 struct phy_info {
+/* From SFF-8472 spec */
+#define SFP_VENDOR_NAME_LEN                    17
        u8 transceiver;
        u8 autoneg;
        u8 fc_autoneg;
@@ -410,6 +393,8 @@ struct phy_info {
        u32 advertising;
        u32 supported;
        u8 cable_type;
+       u8 vendor_name[SFP_VENDOR_NAME_LEN];
+       u8 vendor_pn[SFP_VENDOR_NAME_LEN];
 };
 
 struct be_resources {
@@ -467,8 +452,6 @@ struct be_adapter {
 
        struct be_drv_stats drv_stats;
        struct be_aic_obj aic_obj[MAX_EVT_QS];
-       u16 vlans_added;
-       unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
        u8 vlan_prio_bmap;      /* Available Priority BitMap */
        u16 recommended_prio;   /* Recommended Priority */
        struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
@@ -484,8 +467,15 @@ struct be_adapter {
        /* Ethtool knobs and info */
        char fw_ver[FW_VER_LEN];
        char fw_on_flash[FW_VER_LEN];
+
+       /* IFACE filtering fields */
        int if_handle;          /* Used to configure filtering */
+       u32 if_flags;           /* Interface filtering flags */
        u32 *pmac_id;           /* MAC addr handle used by BE card */
+       u32 uc_macs;            /* Count of secondary UC MAC programmed */
+       unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
+       u16 vlans_added;
+
        u32 beacon_state;       /* for set_phys_id */
 
        bool eeh_error;
@@ -493,7 +483,7 @@ struct be_adapter {
        bool hw_error;
 
        u32 port_num;
-       bool promiscuous;
+       char port_name;
        u8 mc_type;
        u32 function_mode;
        u32 function_caps;
@@ -526,7 +516,6 @@ struct be_adapter {
        struct phy_info phy;
        u8 wol_cap;
        bool wol_en;
-       u32 uc_macs;            /* Count of secondary UC MAC programmed */
        u16 asic_rev;
        u16 qnq_vid;
        u32 msg_enable;
@@ -732,19 +721,6 @@ static inline bool is_ipv4_pkt(struct sk_buff *skb)
        return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
 }
 
-static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
-{
-       u32 addr;
-
-       addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
-
-       mac[5] = (u8)(addr & 0xFF);
-       mac[4] = (u8)((addr >> 8) & 0xFF);
-       mac[3] = (u8)((addr >> 16) & 0xFF);
-       /* Use the OUI from the current MAC address */
-       memcpy(mac, adapter->netdev->dev_addr, 3);
-}
-
 static inline bool be_multi_rxq(const struct be_adapter *adapter)
 {
        return adapter->num_rx_qs > 1;
@@ -767,129 +743,6 @@ static inline void  be_clear_all_error(struct be_adapter *adapter)
        adapter->fw_timeout = false;
 }
 
-static inline bool be_is_wol_excluded(struct be_adapter *adapter)
-{
-       struct pci_dev *pdev = adapter->pdev;
-
-       if (!be_physfn(adapter))
-               return true;
-
-       switch (pdev->subsystem_device) {
-       case OC_SUBSYS_DEVICE_ID1:
-       case OC_SUBSYS_DEVICE_ID2:
-       case OC_SUBSYS_DEVICE_ID3:
-       case OC_SUBSYS_DEVICE_ID4:
-               return true;
-       default:
-               return false;
-       }
-}
-
-static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
-{
-       return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
-}
-
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline bool be_lock_napi(struct be_eq_obj *eqo)
-{
-       bool status = true;
-
-       spin_lock(&eqo->lock); /* BH is already disabled */
-       if (eqo->state & BE_EQ_LOCKED) {
-               WARN_ON(eqo->state & BE_EQ_NAPI);
-               eqo->state |= BE_EQ_NAPI_YIELD;
-               status = false;
-       } else {
-               eqo->state = BE_EQ_NAPI;
-       }
-       spin_unlock(&eqo->lock);
-       return status;
-}
-
-static inline void be_unlock_napi(struct be_eq_obj *eqo)
-{
-       spin_lock(&eqo->lock); /* BH is already disabled */
-
-       WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
-       eqo->state = BE_EQ_IDLE;
-
-       spin_unlock(&eqo->lock);
-}
-
-static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
-{
-       bool status = true;
-
-       spin_lock_bh(&eqo->lock);
-       if (eqo->state & BE_EQ_LOCKED) {
-               eqo->state |= BE_EQ_POLL_YIELD;
-               status = false;
-       } else {
-               eqo->state |= BE_EQ_POLL;
-       }
-       spin_unlock_bh(&eqo->lock);
-       return status;
-}
-
-static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
-{
-       spin_lock_bh(&eqo->lock);
-
-       WARN_ON(eqo->state & (BE_EQ_NAPI));
-       eqo->state = BE_EQ_IDLE;
-
-       spin_unlock_bh(&eqo->lock);
-}
-
-static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
-{
-       spin_lock_init(&eqo->lock);
-       eqo->state = BE_EQ_IDLE;
-}
-
-static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
-{
-       local_bh_disable();
-
-       /* It's enough to just acquire napi lock on the eqo to stop
-        * be_busy_poll() from processing any queueus.
-        */
-       while (!be_lock_napi(eqo))
-               mdelay(1);
-
-       local_bh_enable();
-}
-
-#else /* CONFIG_NET_RX_BUSY_POLL */
-
-static inline bool be_lock_napi(struct be_eq_obj *eqo)
-{
-       return true;
-}
-
-static inline void be_unlock_napi(struct be_eq_obj *eqo)
-{
-}
-
-static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
-{
-       return false;
-}
-
-static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
-{
-}
-
-static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
-{
-}
-
-static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
-{
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
                  u16 num_popped);
 void be_link_status_update(struct be_adapter *adapter, u8 link_status);
@@ -898,16 +751,6 @@ int be_load_fw(struct be_adapter *adapter, u8 *func);
 bool be_is_wol_supported(struct be_adapter *adapter);
 bool be_pause_supported(struct be_adapter *adapter);
 u32 be_get_fw_log_level(struct be_adapter *adapter);
-
-static inline int fw_major_num(const char *fw_ver)
-{
-       int fw_major = 0;
-
-       sscanf(fw_ver, "%d.", &fw_major);
-
-       return fw_major;
-}
-
 int be_update_queues(struct be_adapter *adapter);
 int be_poll(struct napi_struct *napi, int budget);
 
index fead5c65a4f00fe525464bb647442fcd025484d5..36916cfa70f9a7d31f36b7d80135e61f854a499a 100644 (file)
 #include "be.h"
 #include "be_cmds.h"
 
+static char *be_port_misconfig_evt_desc[] = {
+       "A valid SFP module detected",
+       "Optics faulted/ incorrectly installed/ not installed.",
+       "Optics of two types installed.",
+       "Incompatible optics.",
+       "Unknown port SFP status"
+};
+
+static char *be_port_misconfig_remedy_desc[] = {
+       "",
+       "Reseat optics. If issue not resolved, replace",
+       "Remove one optic or install matching pair of optics",
+       "Replace with compatible optics for card to function",
+       ""
+};
+
 static struct be_cmd_priv_map cmd_priv_map[] = {
        {
                OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
@@ -249,6 +265,29 @@ static void be_async_link_state_process(struct be_adapter *adapter,
                                      evt->port_link_status & LINK_STATUS_MASK);
 }
 
+static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
+                                                 struct be_mcc_compl *compl)
+{
+       struct be_async_event_misconfig_port *evt =
+                       (struct be_async_event_misconfig_port *)compl;
+       u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1);
+       struct device *dev = &adapter->pdev->dev;
+       u8 port_misconfig_evt;
+
+       port_misconfig_evt =
+               ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff);
+
+       /* Log an error message that would allow a user to determine
+        * whether the SFPs have an issue
+        */
+       dev_info(dev, "Port %c: %s %s", adapter->port_name,
+                be_port_misconfig_evt_desc[port_misconfig_evt],
+                be_port_misconfig_remedy_desc[port_misconfig_evt]);
+
+       if (port_misconfig_evt == INCOMPATIBLE_SFP)
+               adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP;
+}
+
 /* Grp5 CoS Priority evt */
 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
                                               struct be_mcc_compl *compl)
@@ -334,6 +373,16 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
        }
 }
 
+static void be_async_sliport_evt_process(struct be_adapter *adapter,
+                                        struct be_mcc_compl *cmp)
+{
+       u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
+                       ASYNC_EVENT_TYPE_MASK;
+
+       if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
+               be_async_port_misconfig_event_process(adapter, cmp);
+}
+
 static inline bool is_link_state_evt(u32 flags)
 {
        return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
@@ -352,6 +401,12 @@ static inline bool is_dbg_evt(u32 flags)
                        ASYNC_EVENT_CODE_QNQ;
 }
 
+static inline bool is_sliport_evt(u32 flags)
+{
+       return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+               ASYNC_EVENT_CODE_SLIPORT;
+}
+
 static void be_mcc_event_process(struct be_adapter *adapter,
                                 struct be_mcc_compl *compl)
 {
@@ -361,6 +416,8 @@ static void be_mcc_event_process(struct be_adapter *adapter,
                be_async_grp5_evt_process(adapter, compl);
        else if (is_dbg_evt(compl->flags))
                be_async_dbg_evt_process(adapter, compl);
+       else if (is_sliport_evt(compl->flags))
+               be_async_sliport_evt_process(adapter, compl);
 }
 
 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
@@ -573,7 +630,7 @@ static int lancer_wait_ready(struct be_adapter *adapter)
 {
 #define SLIPORT_READY_TIMEOUT 30
        u32 sliport_status;
-       int status = 0, i;
+       int i;
 
        for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
                sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
@@ -584,9 +641,9 @@ static int lancer_wait_ready(struct be_adapter *adapter)
        }
 
        if (i == SLIPORT_READY_TIMEOUT)
-               status = -1;
+               return sliport_status ? : -1;
 
-       return status;
+       return 0;
 }
 
 static bool lancer_provisioning_error(struct be_adapter *adapter)
@@ -624,7 +681,7 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
                        iowrite32(SLI_PORT_CONTROL_IP_MASK,
                                  adapter->db + SLIPORT_CONTROL_OFFSET);
 
-                       /* check adapter has corrected the error */
+                       /* check if adapter has corrected the error */
                        status = lancer_wait_ready(adapter);
                        sliport_status = ioread32(adapter->db +
                                                  SLIPORT_STATUS_OFFSET);
@@ -655,7 +712,11 @@ int be_fw_wait_ready(struct be_adapter *adapter)
 
        if (lancer_chip(adapter)) {
                status = lancer_wait_ready(adapter);
-               return status;
+               if (status) {
+                       stage = status;
+                       goto err;
+               }
+               return 0;
        }
 
        do {
@@ -671,7 +732,8 @@ int be_fw_wait_ready(struct be_adapter *adapter)
                timeout += 2;
        } while (timeout < 60);
 
-       dev_err(dev, "POST timeout; stage=0x%x\n", stage);
+err:
+       dev_err(dev, "POST timeout; stage=%#x\n", stage);
        return -1;
 }
 
@@ -1166,9 +1228,15 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
                              ctxt, 1);
        }
 
-       /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
-       req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
-       req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
+       /* Subscribe to Link State, Sliport Event and Group 5 Events
+        * (bits 1, 5 and 17 set)
+        */
+       req->async_event_bitmap[0] =
+                       cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
+                                   BIT(ASYNC_EVENT_CODE_GRP_5) |
+                                   BIT(ASYNC_EVENT_CODE_QNQ) |
+                                   BIT(ASYNC_EVENT_CODE_SLIPORT));
+
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 
        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -1881,7 +1949,7 @@ err:
        return status;
 }
 
-int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
+static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
 {
        struct be_mcc_wrb *wrb;
        struct be_dma_mem *mem = &adapter->rx_filter;
@@ -1901,31 +1969,13 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
                               wrb, mem);
 
        req->if_id = cpu_to_le32(adapter->if_handle);
-       if (flags & IFF_PROMISC) {
-               req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
-                                                BE_IF_FLAGS_VLAN_PROMISCUOUS |
-                                                BE_IF_FLAGS_MCAST_PROMISCUOUS);
-               if (value == ON)
-                       req->if_flags =
-                               cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
-                                           BE_IF_FLAGS_VLAN_PROMISCUOUS |
-                                           BE_IF_FLAGS_MCAST_PROMISCUOUS);
-       } else if (flags & IFF_ALLMULTI) {
-               req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
-               req->if_flags = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
-       } else if (flags & BE_FLAGS_VLAN_PROMISC) {
-               req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
-
-               if (value == ON)
-                       req->if_flags =
-                               cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
-       } else {
+       req->if_flags_mask = cpu_to_le32(flags);
+       req->if_flags = (value == ON) ? req->if_flags_mask : 0;
+
+       if (flags & BE_IF_FLAGS_MULTICAST) {
                struct netdev_hw_addr *ha;
                int i = 0;
 
-               req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
-               req->if_flags = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
-
                /* Reset mcast promisc mode if already set by setting mask
                 * and not setting flags field
                 */
@@ -1937,24 +1987,26 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
                        memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
        }
 
-       if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
-           req->if_flags_mask) {
-               dev_warn(&adapter->pdev->dev,
-                        "Cannot set rx filter flags 0x%x\n",
-                        req->if_flags_mask);
-               dev_warn(&adapter->pdev->dev,
-                        "Interface is capable of 0x%x flags only\n",
-                        be_if_cap_flags(adapter));
-       }
-       req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter));
-
        status = be_mcc_notify_wait(adapter);
-
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
 }
 
+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
+{
+       struct device *dev = &adapter->pdev->dev;
+
+       if ((flags & be_if_cap_flags(adapter)) != flags) {
+               dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
+               dev_warn(dev, "Interface is capable of 0x%x flags only\n",
+                        be_if_cap_flags(adapter));
+       }
+       flags &= be_if_cap_flags(adapter);
+
+       return __be_cmd_rx_filter(adapter, flags, value);
+}
+
 /* Uses synchrounous mcc */
 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
 {
@@ -2355,6 +2407,24 @@ int be_cmd_query_cable_type(struct be_adapter *adapter)
        return status;
 }
 
+int be_cmd_query_sfp_info(struct be_adapter *adapter)
+{
+       u8 page_data[PAGE_DATA_LEN];
+       int status;
+
+       status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
+                                                  page_data);
+       if (!status) {
+               strlcpy(adapter->phy.vendor_name, page_data +
+                       SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
+               strlcpy(adapter->phy.vendor_pn,
+                       page_data + SFP_VENDOR_PN_OFFSET,
+                       SFP_VENDOR_NAME_LEN - 1);
+       }
+
+       return status;
+}
+
 int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
 {
        struct lancer_cmd_req_delete_object *req;
@@ -2431,7 +2501,8 @@ err_unlock:
 }
 
 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
-                         u32 flash_type, u32 flash_opcode, u32 buf_size)
+                         u32 flash_type, u32 flash_opcode, u32 img_offset,
+                         u32 buf_size)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_write_flashrom *req;
@@ -2452,6 +2523,9 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
                               cmd);
 
        req->params.op_type = cpu_to_le32(flash_type);
+       if (flash_type == OPTYPE_OFFSET_SPECIFIED)
+               req->params.offset = cpu_to_le32(img_offset);
+
        req->params.op_code = cpu_to_le32(flash_opcode);
        req->params.data_buf_size = cpu_to_le32(buf_size);
 
@@ -2472,10 +2546,10 @@ err_unlock:
 }
 
 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
-                        u16 optype, int offset)
+                        u16 img_optype, u32 img_offset, u32 crc_offset)
 {
-       struct be_mcc_wrb *wrb;
        struct be_cmd_read_flash_crc *req;
+       struct be_mcc_wrb *wrb;
        int status;
 
        spin_lock_bh(&adapter->mcc_lock);
@@ -2491,9 +2565,13 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
                               OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
                               wrb, NULL);
 
-       req->params.op_type = cpu_to_le32(optype);
+       req->params.op_type = cpu_to_le32(img_optype);
+       if (img_optype == OPTYPE_OFFSET_SPECIFIED)
+               req->params.offset = cpu_to_le32(img_offset + crc_offset);
+       else
+               req->params.offset = cpu_to_le32(crc_offset);
+
        req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
-       req->params.offset = cpu_to_le32(offset);
        req->params.data_buf_size = cpu_to_le32(0x4);
 
        status = be_mcc_notify_wait(adapter);
@@ -2742,7 +2820,7 @@ err:
        return status;
 }
 
-int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
+static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_set_qos *req;
@@ -3236,6 +3314,24 @@ err:
        return status;
 }
 
+static bool be_is_wol_excluded(struct be_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+
+       if (!be_physfn(adapter))
+               return true;
+
+       switch (pdev->subsystem_device) {
+       case OC_SUBSYS_DEVICE_ID1:
+       case OC_SUBSYS_DEVICE_ID2:
+       case OC_SUBSYS_DEVICE_ID3:
+       case OC_SUBSYS_DEVICE_ID4:
+               return true;
+       default:
+               return false;
+       }
+}
+
 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
 {
        struct be_mcc_wrb *wrb;
@@ -3422,42 +3518,34 @@ err:
        return status;
 }
 
-int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
+int be_cmd_query_port_name(struct be_adapter *adapter)
 {
-       struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_port_name *req;
+       struct be_mcc_wrb *wrb;
        int status;
 
-       if (!lancer_chip(adapter)) {
-               *port_name = adapter->hba_port_num + '0';
-               return 0;
-       }
-
-       spin_lock_bh(&adapter->mcc_lock);
-
-       wrb = wrb_from_mccq(adapter);
-       if (!wrb) {
-               status = -EBUSY;
-               goto err;
-       }
+       if (mutex_lock_interruptible(&adapter->mbox_lock))
+               return -1;
 
+       wrb = wrb_from_mbox(adapter);
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                               OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
                               NULL);
-       req->hdr.version = 1;
+       if (!BEx_chip(adapter))
+               req->hdr.version = 1;
 
-       status = be_mcc_notify_wait(adapter);
+       status = be_mbox_notify_wait(adapter);
        if (!status) {
                struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
 
-               *port_name = resp->port_name[adapter->hba_port_num];
+               adapter->port_name = resp->port_name[adapter->hba_port_num];
        } else {
-               *port_name = adapter->hba_port_num + '0';
+               adapter->port_name = adapter->hba_port_num + '0';
        }
-err:
-       spin_unlock_bh(&adapter->mcc_lock);
+
+       mutex_unlock(&adapter->mbox_lock);
        return status;
 }
 
@@ -3751,6 +3839,7 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
        be_reset_nic_desc(&nic_desc);
        nic_desc.pf_num = adapter->pf_number;
        nic_desc.vf_num = domain;
+       nic_desc.bw_min = 0;
        if (lancer_chip(adapter)) {
                nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
                nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
@@ -4092,7 +4181,7 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
        int status;
 
        if (BEx_chip(adapter) || lancer_chip(adapter))
-               return 0;
+               return -EOPNOTSUPP;
 
        spin_lock_bh(&adapter->mcc_lock);
 
index eb5085d6794fde8b4635d82bd0427d0fde084844..db761e8e42a3224486ced01238f9fb6b61ad3579 100644 (file)
@@ -44,10 +44,10 @@ struct be_mcc_wrb {
        } payload;
 };
 
-#define CQE_FLAGS_VALID_MASK           (1 << 31)
-#define CQE_FLAGS_ASYNC_MASK           (1 << 30)
-#define CQE_FLAGS_COMPLETED_MASK       (1 << 28)
-#define CQE_FLAGS_CONSUMED_MASK        (1 << 27)
+#define CQE_FLAGS_VALID_MASK           BIT(31)
+#define CQE_FLAGS_ASYNC_MASK           BIT(30)
+#define CQE_FLAGS_COMPLETED_MASK       BIT(28)
+#define CQE_FLAGS_CONSUMED_MASK                BIT(27)
 
 /* Completion Status */
 enum mcc_base_status {
@@ -102,6 +102,8 @@ struct be_mcc_compl {
 #define ASYNC_EVENT_PVID_STATE         0x3
 #define ASYNC_EVENT_CODE_QNQ           0x6
 #define ASYNC_DEBUG_EVENT_TYPE_QNQ     1
+#define ASYNC_EVENT_CODE_SLIPORT       0x11
+#define ASYNC_EVENT_PORT_MISCONFIG     0x9
 
 enum {
        LINK_DOWN       = 0x0,
@@ -169,6 +171,15 @@ struct be_async_event_qnq {
        u32 flags;
 } __packed;
 
+#define INCOMPATIBLE_SFP               0x3
+/* async event indicating misconfigured port */
+struct be_async_event_misconfig_port {
+       u32 event_data_word1;
+       u32 event_data_word2;
+       u32 rsvd0;
+       u32 flags;
+} __packed;
+
 struct be_mcc_mailbox {
        struct be_mcc_wrb wrb;
        struct be_mcc_compl compl;
@@ -586,6 +597,10 @@ enum be_if_flags {
                         BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
                         BE_IF_FLAGS_UNTAGGED)
 
+#define BE_IF_FLAGS_ALL_PROMISCUOUS    (BE_IF_FLAGS_PROMISCUOUS | \
+                                        BE_IF_FLAGS_VLAN_PROMISCUOUS |\
+                                        BE_IF_FLAGS_MCAST_PROMISCUOUS)
+
 /* An RX interface is an object with one or more MAC addresses and
  * filtering capabilities. */
 struct be_cmd_req_if_create {
@@ -1024,6 +1039,8 @@ enum {
 #define        SFP_PLUS_SFF_8472_COMP          0x5E
 #define        SFP_PLUS_CABLE_TYPE_OFFSET      0x8
 #define        SFP_PLUS_COPPER_CABLE           0x4
+#define SFP_VENDOR_NAME_OFFSET         0x14
+#define SFP_VENDOR_PN_OFFSET           0x28
 
 #define PAGE_DATA_LEN   256
 struct be_cmd_resp_port_type {
@@ -1091,6 +1108,10 @@ struct be_cmd_req_query_fw_cfg {
        u32 rsvd[31];
 };
 
+/* ASIC revisions */
+#define ASIC_REV_B0            0x10
+#define ASIC_REV_P2            0x11
+
 struct be_cmd_resp_query_fw_cfg {
        struct be_cmd_resp_hdr hdr;
        u32 be_config_number;
@@ -1161,7 +1182,173 @@ struct be_cmd_resp_get_beacon_state {
        u8 rsvd0[3];
 } __packed;
 
+/* Flashrom related descriptors */
+#define MAX_FLASH_COMP                 32
+
+#define OPTYPE_ISCSI_ACTIVE            0
+#define OPTYPE_REDBOOT                 1
+#define OPTYPE_BIOS                    2
+#define OPTYPE_PXE_BIOS                        3
+#define OPTYPE_OFFSET_SPECIFIED                7
+#define OPTYPE_FCOE_BIOS               8
+#define OPTYPE_ISCSI_BACKUP            9
+#define OPTYPE_FCOE_FW_ACTIVE          10
+#define OPTYPE_FCOE_FW_BACKUP          11
+#define OPTYPE_NCSI_FW                 13
+#define OPTYPE_REDBOOT_DIR             18
+#define OPTYPE_REDBOOT_CONFIG          19
+#define OPTYPE_SH_PHY_FW               21
+#define OPTYPE_FLASHISM_JUMPVECTOR     22
+#define OPTYPE_UFI_DIR                 23
+#define OPTYPE_PHY_FW                  99
+
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g2   262144  /* Max OPTION ROM image sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2        262144  /* Max Redboot image sz    */
+#define FLASH_IMAGE_MAX_SIZE_g2                1310720 /* Max firmware image size */
+
+#define FLASH_NCSI_IMAGE_MAX_SIZE_g3   262144
+#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g3   524288  /* Max OPTION ROM image sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3        1048576 /* Max Redboot image sz    */
+#define FLASH_IMAGE_MAX_SIZE_g3                2097152 /* Max firmware image size */
+
+/* Offsets for components on Flash. */
+#define FLASH_REDBOOT_START_g2                 0
+#define FLASH_FCoE_BIOS_START_g2               524288
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2     1048576
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g2      2359296
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g2      3670016
+#define FLASH_FCoE_BACKUP_IMAGE_START_g2       4980736
+#define FLASH_iSCSI_BIOS_START_g2              7340032
+#define FLASH_PXE_BIOS_START_g2                        7864320
+
+#define FLASH_REDBOOT_START_g3                 262144
+#define FLASH_PHY_FW_START_g3                  1310720
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3     2097152
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g3      4194304
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g3      6291456
+#define FLASH_FCoE_BACKUP_IMAGE_START_g3       8388608
+#define FLASH_iSCSI_BIOS_START_g3              12582912
+#define FLASH_PXE_BIOS_START_g3                        13107200
+#define FLASH_FCoE_BIOS_START_g3               13631488
+#define FLASH_NCSI_START_g3                    15990784
+
+#define IMAGE_NCSI                     16
+#define IMAGE_OPTION_ROM_PXE           32
+#define IMAGE_OPTION_ROM_FCoE          33
+#define IMAGE_OPTION_ROM_ISCSI         34
+#define IMAGE_FLASHISM_JUMPVECTOR      48
+#define IMAGE_FIRMWARE_iSCSI           160
+#define IMAGE_FIRMWARE_FCoE            162
+#define IMAGE_FIRMWARE_BACKUP_iSCSI    176
+#define IMAGE_FIRMWARE_BACKUP_FCoE     178
+#define IMAGE_FIRMWARE_PHY             192
+#define IMAGE_REDBOOT_DIR              208
+#define IMAGE_REDBOOT_CONFIG           209
+#define IMAGE_UFI_DIR                  210
+#define IMAGE_BOOT_CODE                        224
+
+struct controller_id {
+       u32 vendor;
+       u32 device;
+       u32 subvendor;
+       u32 subdevice;
+};
+
+struct flash_comp {
+       unsigned long offset;
+       int optype;
+       int size;
+       int img_type;
+};
+
+struct image_hdr {
+       u32 imageid;
+       u32 imageoffset;
+       u32 imagelength;
+       u32 image_checksum;
+       u8 image_version[32];
+};
+
+struct flash_file_hdr_g2 {
+       u8 sign[32];
+       u32 cksum;
+       u32 antidote;
+       struct controller_id cont_id;
+       u32 file_len;
+       u32 chunk_num;
+       u32 total_chunks;
+       u32 num_imgs;
+       u8 build[24];
+};
+
+/* First letter of the build version of the image */
+#define BLD_STR_UFI_TYPE_BE2   '2'
+#define BLD_STR_UFI_TYPE_BE3   '3'
+#define BLD_STR_UFI_TYPE_SH    '4'
+
+struct flash_file_hdr_g3 {
+       u8 sign[52];
+       u8 ufi_version[4];
+       u32 file_len;
+       u32 cksum;
+       u32 antidote;
+       u32 num_imgs;
+       u8 build[24];
+       u8 asic_type_rev;
+       u8 rsvd[31];
+};
+
+struct flash_section_hdr {
+       u32 format_rev;
+       u32 cksum;
+       u32 antidote;
+       u32 num_images;
+       u8 id_string[128];
+       u32 rsvd[4];
+} __packed;
+
+struct flash_section_hdr_g2 {
+       u32 format_rev;
+       u32 cksum;
+       u32 antidote;
+       u32 build_num;
+       u8 id_string[128];
+       u32 rsvd[8];
+} __packed;
+
+struct flash_section_entry {
+       u32 type;
+       u32 offset;
+       u32 pad_size;
+       u32 image_size;
+       u32 cksum;
+       u32 entry_point;
+       u16 optype;
+       u16 rsvd0;
+       u32 rsvd1;
+       u8 ver_data[32];
+} __packed;
+
+struct flash_section_info {
+       u8 cookie[32];
+       struct flash_section_hdr fsec_hdr;
+       struct flash_section_entry fsec_entry[32];
+} __packed;
+
+struct flash_section_info_g2 {
+       u8 cookie[32];
+       struct flash_section_hdr_g2 fsec_hdr;
+       struct flash_section_entry fsec_entry[32];
+} __packed;
+
 /****************** Firmware Flash ******************/
+#define FLASHROM_OPER_FLASH            1
+#define FLASHROM_OPER_SAVE             2
+#define FLASHROM_OPER_REPORT           4
+#define FLASHROM_OPER_PHY_FLASH                9
+#define FLASHROM_OPER_PHY_SAVE         10
+
 struct flashrom_params {
        u32 op_code;
        u32 op_type;
@@ -1366,6 +1553,7 @@ enum {
        PHY_TYPE_QSFP,
        PHY_TYPE_KR4_40GB,
        PHY_TYPE_KR2_20GB,
+       PHY_TYPE_TN_8022,
        PHY_TYPE_DISABLED = 255
 };
 
@@ -1429,6 +1617,20 @@ struct be_cmd_req_set_qos {
 };
 
 /*********************** Controller Attributes ***********************/
+struct mgmt_hba_attribs {
+       u32 rsvd0[24];
+       u8 controller_model_number[32];
+       u32 rsvd1[79];
+       u8 rsvd2[3];
+       u8 phy_port;
+       u32 rsvd3[13];
+} __packed;
+
+struct mgmt_controller_attrib {
+       struct mgmt_hba_attribs hba_attribs;
+       u32 rsvd0[10];
+} __packed;
+
 struct be_cmd_req_cntl_attribs {
        struct be_cmd_req_hdr hdr;
 };
@@ -2070,8 +2272,10 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
 int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
                                      u8 page_num, u8 *data);
 int be_cmd_query_cable_type(struct be_adapter *adapter);
+int be_cmd_query_sfp_info(struct be_adapter *adapter);
 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
-                         u32 flash_oper, u32 flash_opcode, u32 buf_size);
+                         u32 flash_oper, u32 flash_opcode, u32 img_offset,
+                         u32 buf_size);
 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
                            u32 data_size, u32 data_offset,
                            const char *obj_name, u32 *data_written,
@@ -2081,7 +2285,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
                           u32 *data_read, u32 *eof, u8 *addn_status);
 int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name);
 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
-                         u16 optype, int offset);
+                        u16 img_optype, u32 img_offset, u32 crc_offset);
 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
                            struct be_dma_mem *nonemb_cmd);
 int be_cmd_fw_init(struct be_adapter *adapter);
@@ -2136,7 +2340,7 @@ int lancer_initiate_dump(struct be_adapter *adapter);
 int lancer_delete_dump(struct be_adapter *adapter);
 bool dump_present(struct be_adapter *adapter);
 int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
-int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
+int be_cmd_query_port_name(struct be_adapter *adapter);
 int be_cmd_get_func_config(struct be_adapter *adapter,
                           struct be_resources *res);
 int be_cmd_get_profile_config(struct be_adapter *adapter,
index 73a500ccbf69592d0de09b0cc17f4db82e925664..4d2de47007692a85e1477da07b98402b489bc312 100644 (file)
@@ -193,8 +193,6 @@ static const struct be_ethtool_stat et_tx_stats[] = {
        {DRVSTAT_TX_INFO(tx_pkts)},
        /* Number of skbs queued for trasmission by the driver */
        {DRVSTAT_TX_INFO(tx_reqs)},
-       /* Number of TX work request blocks DMAed to HW */
-       {DRVSTAT_TX_INFO(tx_wrbs)},
        /* Number of times the TX queue was stopped due to lack
         * of spaces in the TXQ.
         */
@@ -707,15 +705,17 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
 
        if (ecmd->autoneg != adapter->phy.fc_autoneg)
                return -EINVAL;
-       adapter->tx_fc = ecmd->tx_pause;
-       adapter->rx_fc = ecmd->rx_pause;
 
-       status = be_cmd_set_flow_control(adapter,
-                                        adapter->tx_fc, adapter->rx_fc);
-       if (status)
+       status = be_cmd_set_flow_control(adapter, ecmd->tx_pause,
+                                        ecmd->rx_pause);
+       if (status) {
                dev_warn(&adapter->pdev->dev, "Pause param set failed\n");
+               return be_cmd_status(status);
+       }
 
-       return be_cmd_status(status);
+       adapter->tx_fc = ecmd->tx_pause;
+       adapter->rx_fc = ecmd->rx_pause;
+       return 0;
 }
 
 static int be_set_phys_id(struct net_device *netdev,
index 295ee0835ba0bb32979d9923fd2db75f030e55a6..48840889db6226325bb0f05de610842b255efc82 100644 (file)
@@ -75,7 +75,7 @@
  * atomically without having to arbitrate for the PCI Interrupt Disable bit
  * with the OS.
  */
-#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK     (1 << 29) /* bit 29 */
+#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK     BIT(29) /* bit 29 */
 
 /********* PCI Function Capability *********/
 #define BE_FUNCTION_CAPS_RSS                   0x2
 #define RETRIEVE_FAT   0
 #define QUERY_FAT      1
 
-/* Flashrom related descriptors */
-#define MAX_FLASH_COMP                 32
-#define IMAGE_TYPE_FIRMWARE            160
-#define IMAGE_TYPE_BOOTCODE            224
-#define IMAGE_TYPE_OPTIONROM           32
-
-#define NUM_FLASHDIR_ENTRIES           32
-
-#define OPTYPE_ISCSI_ACTIVE            0
-#define OPTYPE_REDBOOT                 1
-#define OPTYPE_BIOS                    2
-#define OPTYPE_PXE_BIOS                        3
-#define OPTYPE_FCOE_BIOS               8
-#define OPTYPE_ISCSI_BACKUP            9
-#define OPTYPE_FCOE_FW_ACTIVE          10
-#define OPTYPE_FCOE_FW_BACKUP          11
-#define OPTYPE_NCSI_FW                 13
-#define OPTYPE_REDBOOT_DIR             18
-#define OPTYPE_REDBOOT_CONFIG          19
-#define OPTYPE_SH_PHY_FW               21
-#define OPTYPE_FLASHISM_JUMPVECTOR     22
-#define OPTYPE_UFI_DIR                 23
-#define OPTYPE_PHY_FW                  99
-#define TN_8022                                13
-
-#define FLASHROM_OPER_PHY_FLASH                9
-#define FLASHROM_OPER_PHY_SAVE         10
-#define FLASHROM_OPER_FLASH            1
-#define FLASHROM_OPER_SAVE             2
-#define FLASHROM_OPER_REPORT           4
-
-#define FLASH_IMAGE_MAX_SIZE_g2                (1310720) /* Max firmware image size */
-#define FLASH_BIOS_IMAGE_MAX_SIZE_g2   (262144)  /* Max OPTION ROM image sz */
-#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2        (262144)  /* Max Redboot image sz    */
-#define FLASH_IMAGE_MAX_SIZE_g3                (2097152) /* Max firmware image size */
-#define FLASH_BIOS_IMAGE_MAX_SIZE_g3   (524288)  /* Max OPTION ROM image sz */
-#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3        (1048576)  /* Max Redboot image sz    */
-#define FLASH_NCSI_IMAGE_MAX_SIZE_g3   (262144)
-#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144
-
-#define FLASH_NCSI_MAGIC               (0x16032009)
-#define FLASH_NCSI_DISABLED            (0)
-#define FLASH_NCSI_ENABLED             (1)
-
-#define FLASH_NCSI_BITFILE_HDR_OFFSET  (0x600000)
-
-/* Offsets for components on Flash. */
-#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
-#define FLASH_iSCSI_BACKUP_IMAGE_START_g2  (2359296)
-#define FLASH_FCoE_PRIMARY_IMAGE_START_g2  (3670016)
-#define FLASH_FCoE_BACKUP_IMAGE_START_g2   (4980736)
-#define FLASH_iSCSI_BIOS_START_g2          (7340032)
-#define FLASH_PXE_BIOS_START_g2            (7864320)
-#define FLASH_FCoE_BIOS_START_g2           (524288)
-#define FLASH_REDBOOT_START_g2           (0)
-
-#define FLASH_NCSI_START_g3               (15990784)
-#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
-#define FLASH_iSCSI_BACKUP_IMAGE_START_g3  (4194304)
-#define FLASH_FCoE_PRIMARY_IMAGE_START_g3  (6291456)
-#define FLASH_FCoE_BACKUP_IMAGE_START_g3   (8388608)
-#define FLASH_iSCSI_BIOS_START_g3          (12582912)
-#define FLASH_PXE_BIOS_START_g3            (13107200)
-#define FLASH_FCoE_BIOS_START_g3           (13631488)
-#define FLASH_REDBOOT_START_g3             (262144)
-#define FLASH_PHY_FW_START_g3             1310720
-
-#define IMAGE_NCSI                     16
-#define IMAGE_OPTION_ROM_PXE           32
-#define IMAGE_OPTION_ROM_FCoE          33
-#define IMAGE_OPTION_ROM_ISCSI         34
-#define IMAGE_FLASHISM_JUMPVECTOR      48
-#define IMAGE_FLASH_ISM                        49
-#define IMAGE_JUMP_VECTOR              50
-#define IMAGE_FIRMWARE_iSCSI           160
-#define IMAGE_FIRMWARE_COMP_iSCSI      161
-#define IMAGE_FIRMWARE_FCoE            162
-#define IMAGE_FIRMWARE_COMP_FCoE       163
-#define IMAGE_FIRMWARE_BACKUP_iSCSI    176
-#define IMAGE_FIRMWARE_BACKUP_COMP_iSCSI 177
-#define IMAGE_FIRMWARE_BACKUP_FCoE     178
-#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179
-#define IMAGE_FIRMWARE_PHY             192
-#define IMAGE_REDBOOT_DIR              208
-#define IMAGE_REDBOOT_CONFIG           209
-#define IMAGE_UFI_DIR                  210
-#define IMAGE_BOOT_CODE                        224
-
 /************* Rx Packet Type Encoding **************/
 #define BE_UNICAST_PACKET              0
 #define BE_MULTICAST_PACKET            1
@@ -281,10 +193,10 @@ struct be_eq_entry {
 /* TX Queue Descriptor */
 #define ETH_WRB_FRAG_LEN_MASK          0xFFFF
 struct be_eth_wrb {
-       u32 frag_pa_hi;         /* dword 0 */
-       u32 frag_pa_lo;         /* dword 1 */
-       u32 rsvd0;              /* dword 2 */
-       u32 frag_len;           /* dword 3: bits 0 - 15 */
+       __le32 frag_pa_hi;              /* dword 0 */
+       __le32 frag_pa_lo;              /* dword 1 */
+       u32 rsvd0;                      /* dword 2 */
+       __le32 frag_len;                /* dword 3: bits 0 - 15 */
 } __packed;
 
 /* Pseudo amap definition for eth_hdr_wrb in which each bit of the
@@ -311,8 +223,13 @@ struct amap_eth_hdr_wrb {
        u8 vlan_tag[16];
 } __packed;
 
+#define TX_HDR_WRB_COMPL               1               /* word 2 */
+#define TX_HDR_WRB_EVT                 BIT(1)          /* word 2 */
+#define TX_HDR_WRB_NUM_SHIFT           13              /* word 2: bits 13:17 */
+#define TX_HDR_WRB_NUM_MASK            0x1F            /* word 2: bits 13:17 */
+
 struct be_eth_hdr_wrb {
-       u32 dw[4];
+       __le32 dw[4];
 };
 
 /********* Tx Compl Status Encoding *********/
@@ -435,138 +352,3 @@ struct amap_eth_rx_compl_v1 {
 struct be_eth_rx_compl {
        u32 dw[4];
 };
-
-struct mgmt_hba_attribs {
-       u8 flashrom_version_string[32];
-       u8 manufacturer_name[32];
-       u32 supported_modes;
-       u32 rsvd0[3];
-       u8 ncsi_ver_string[12];
-       u32 default_extended_timeout;
-       u8 controller_model_number[32];
-       u8 controller_description[64];
-       u8 controller_serial_number[32];
-       u8 ip_version_string[32];
-       u8 firmware_version_string[32];
-       u8 bios_version_string[32];
-       u8 redboot_version_string[32];
-       u8 driver_version_string[32];
-       u8 fw_on_flash_version_string[32];
-       u32 functionalities_supported;
-       u16 max_cdblength;
-       u8 asic_revision;
-       u8 generational_guid[16];
-       u8 hba_port_count;
-       u16 default_link_down_timeout;
-       u8 iscsi_ver_min_max;
-       u8 multifunction_device;
-       u8 cache_valid;
-       u8 hba_status;
-       u8 max_domains_supported;
-       u8 phy_port;
-       u32 firmware_post_status;
-       u32 hba_mtu[8];
-       u32 rsvd1[4];
-};
-
-struct mgmt_controller_attrib {
-       struct mgmt_hba_attribs hba_attribs;
-       u16 pci_vendor_id;
-       u16 pci_device_id;
-       u16 pci_sub_vendor_id;
-       u16 pci_sub_system_id;
-       u8 pci_bus_number;
-       u8 pci_device_number;
-       u8 pci_function_number;
-       u8 interface_type;
-       u64 unique_identifier;
-       u32 rsvd0[5];
-};
-
-struct controller_id {
-       u32 vendor;
-       u32 device;
-       u32 subvendor;
-       u32 subdevice;
-};
-
-struct flash_comp {
-       unsigned long offset;
-       int optype;
-       int size;
-       int img_type;
-};
-
-struct image_hdr {
-       u32 imageid;
-       u32 imageoffset;
-       u32 imagelength;
-       u32 image_checksum;
-       u8 image_version[32];
-};
-struct flash_file_hdr_g2 {
-       u8 sign[32];
-       u32 cksum;
-       u32 antidote;
-       struct controller_id cont_id;
-       u32 file_len;
-       u32 chunk_num;
-       u32 total_chunks;
-       u32 num_imgs;
-       u8 build[24];
-};
-
-struct flash_file_hdr_g3 {
-       u8 sign[52];
-       u8 ufi_version[4];
-       u32 file_len;
-       u32 cksum;
-       u32 antidote;
-       u32 num_imgs;
-       u8 build[24];
-       u8 asic_type_rev;
-       u8 rsvd[31];
-};
-
-struct flash_section_hdr {
-       u32 format_rev;
-       u32 cksum;
-       u32 antidote;
-       u32 num_images;
-       u8 id_string[128];
-       u32 rsvd[4];
-} __packed;
-
-struct flash_section_hdr_g2 {
-       u32 format_rev;
-       u32 cksum;
-       u32 antidote;
-       u32 build_num;
-       u8 id_string[128];
-       u32 rsvd[8];
-} __packed;
-
-struct flash_section_entry {
-       u32 type;
-       u32 offset;
-       u32 pad_size;
-       u32 image_size;
-       u32 cksum;
-       u32 entry_point;
-       u16 optype;
-       u16 rsvd0;
-       u32 rsvd1;
-       u8 ver_data[32];
-} __packed;
-
-struct flash_section_info {
-       u8 cookie[32];
-       struct flash_section_hdr fsec_hdr;
-       struct flash_section_entry fsec_entry[32];
-} __packed;
-
-struct flash_section_info_g2 {
-       u8 cookie[32];
-       struct flash_section_hdr_g2 fsec_hdr;
-       struct flash_section_entry fsec_entry[32];
-} __packed;
index 41a0a5498da74c7b9b1129b68c9173c1b15470a4..932b93a1496592ef6d38820997c74d4e36ddf526 100644 (file)
@@ -662,48 +662,40 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
                netif_carrier_off(netdev);
 }
 
-static void be_tx_stats_update(struct be_tx_obj *txo,
-                              u32 wrb_cnt, u32 copied, u32 gso_segs,
-                              bool stopped)
+static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
 {
        struct be_tx_stats *stats = tx_stats(txo);
 
        u64_stats_update_begin(&stats->sync);
        stats->tx_reqs++;
-       stats->tx_wrbs += wrb_cnt;
-       stats->tx_bytes += copied;
-       stats->tx_pkts += (gso_segs ? gso_segs : 1);
-       if (stopped)
-               stats->tx_stops++;
+       stats->tx_bytes += skb->len;
+       stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
        u64_stats_update_end(&stats->sync);
 }
 
-/* Determine number of WRB entries needed to xmit data in an skb */
-static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
-                          bool *dummy)
+/* Returns number of WRBs needed for the skb */
+static u32 skb_wrb_cnt(struct sk_buff *skb)
 {
-       int cnt = (skb->len > skb->data_len);
-
-       cnt += skb_shinfo(skb)->nr_frags;
-
-       /* to account for hdr wrb */
-       cnt++;
-       if (lancer_chip(adapter) || !(cnt & 1)) {
-               *dummy = false;
-       } else {
-               /* add a dummy to make it an even num */
-               cnt++;
-               *dummy = true;
-       }
-       BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
-       return cnt;
+       /* +1 for the header wrb */
+       return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
 }
 
 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
 {
-       wrb->frag_pa_hi = upper_32_bits(addr);
-       wrb->frag_pa_lo = addr & 0xFFFFFFFF;
-       wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
+       wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
+       wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
+       wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
+       wrb->rsvd0 = 0;
+}
+
+/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
+ * to avoid the swap and shift/mask operations in wrb_fill().
+ */
+static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
+{
+       wrb->frag_pa_hi = 0;
+       wrb->frag_pa_lo = 0;
+       wrb->frag_len = 0;
        wrb->rsvd0 = 0;
 }
 
@@ -713,7 +705,7 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
        u8 vlan_prio;
        u16 vlan_tag;
 
-       vlan_tag = vlan_tx_tag_get(skb);
+       vlan_tag = skb_vlan_tag_get(skb);
        vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
        /* If vlan priority provided by OS is NOT in available bmap */
        if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
@@ -764,52 +756,57 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
                        SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
        }
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
                vlan_tag = be_get_tx_vlan_tag(adapter, skb);
                SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
        }
 
-       /* To skip HW VLAN tagging: evt = 1, compl = 0 */
-       SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
-       SET_TX_WRB_HDR_BITS(event, hdr, 1);
        SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
        SET_TX_WRB_HDR_BITS(len, hdr, len);
+
+       /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
+        * When this hack is not needed, the evt bit is set while ringing DB
+        */
+       if (skip_hw_vlan)
+               SET_TX_WRB_HDR_BITS(event, hdr, 1);
 }
 
 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
                          bool unmap_single)
 {
        dma_addr_t dma;
+       u32 frag_len = le32_to_cpu(wrb->frag_len);
 
-       be_dws_le_to_cpu(wrb, sizeof(*wrb));
 
-       dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
-       if (wrb->frag_len) {
+       dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
+               (u64)le32_to_cpu(wrb->frag_pa_lo);
+       if (frag_len) {
                if (unmap_single)
-                       dma_unmap_single(dev, dma, wrb->frag_len,
-                                        DMA_TO_DEVICE);
+                       dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
                else
-                       dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
+                       dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
        }
 }
 
-static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
-                       struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
-                       bool skip_hw_vlan)
+/* Returns the number of WRBs used up by the skb */
+static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
+                          struct sk_buff *skb, bool skip_hw_vlan)
 {
-       dma_addr_t busaddr;
-       int i, copied = 0;
+       u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
        struct device *dev = &adapter->pdev->dev;
-       struct sk_buff *first_skb = skb;
-       struct be_eth_wrb *wrb;
+       struct be_queue_info *txq = &txo->q;
        struct be_eth_hdr_wrb *hdr;
        bool map_single = false;
-       u16 map_head;
+       struct be_eth_wrb *wrb;
+       dma_addr_t busaddr;
+       u16 head = txq->head;
 
        hdr = queue_head_node(txq);
+       wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
+       be_dws_cpu_to_le(hdr, sizeof(*hdr));
+
        queue_head_inc(txq);
-       map_head = txq->head;
 
        if (skb->len > skb->data_len) {
                int len = skb_headlen(skb);
@@ -820,7 +817,6 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
                map_single = true;
                wrb = queue_head_node(txq);
                wrb_fill(wrb, busaddr, len);
-               be_dws_cpu_to_le(wrb, sizeof(*wrb));
                queue_head_inc(txq);
                copied += len;
        }
@@ -834,35 +830,44 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
                        goto dma_err;
                wrb = queue_head_node(txq);
                wrb_fill(wrb, busaddr, skb_frag_size(frag));
-               be_dws_cpu_to_le(wrb, sizeof(*wrb));
                queue_head_inc(txq);
                copied += skb_frag_size(frag);
        }
 
-       if (dummy_wrb) {
-               wrb = queue_head_node(txq);
-               wrb_fill(wrb, 0, 0);
-               be_dws_cpu_to_le(wrb, sizeof(*wrb));
-               queue_head_inc(txq);
-       }
+       BUG_ON(txo->sent_skb_list[head]);
+       txo->sent_skb_list[head] = skb;
+       txo->last_req_hdr = head;
+       atomic_add(wrb_cnt, &txq->used);
+       txo->last_req_wrb_cnt = wrb_cnt;
+       txo->pend_wrb_cnt += wrb_cnt;
 
-       wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
-       be_dws_cpu_to_le(hdr, sizeof(*hdr));
+       be_tx_stats_update(txo, skb);
+       return wrb_cnt;
 
-       return copied;
 dma_err:
-       txq->head = map_head;
+       /* Bring the queue back to the state it was in before this
+        * routine was invoked.
+        */
+       txq->head = head;
+       /* skip the first wrb (hdr); it's not mapped */
+       queue_head_inc(txq);
        while (copied) {
                wrb = queue_head_node(txq);
                unmap_tx_frag(dev, wrb, map_single);
                map_single = false;
-               copied -= wrb->frag_len;
+               copied -= le32_to_cpu(wrb->frag_len);
                adapter->drv_stats.dma_map_errors++;
                queue_head_inc(txq);
        }
+       txq->head = head;
        return 0;
 }
 
+static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
+{
+       return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
+}
+
 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
                                             struct sk_buff *skb,
                                             bool *skip_hw_vlan)
@@ -873,7 +878,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
        if (unlikely(!skb))
                return skb;
 
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                vlan_tag = be_get_tx_vlan_tag(adapter, skb);
 
        if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
@@ -932,7 +937,7 @@ static bool be_ipv6_exthdr_check(struct sk_buff *skb)
 
 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
 {
-       return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
+       return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
 }
 
 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
@@ -955,7 +960,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
        eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
                                                VLAN_ETH_HLEN : ETH_HLEN;
        if (skb->len <= 60 &&
-           (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
+           (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
            is_ipv4_pkt(skb)) {
                ip = (struct iphdr *)ip_hdr(skb);
                pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
@@ -973,7 +978,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
         * Manually insert VLAN in pkt.
         */
        if (skb->ip_summed != CHECKSUM_PARTIAL &&
-           vlan_tx_tag_present(skb)) {
+           skb_vlan_tag_present(skb)) {
                skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
                if (unlikely(!skb))
                        goto err;
@@ -1030,52 +1035,64 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
        return skb;
 }
 
+static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
+{
+       struct be_queue_info *txq = &txo->q;
+       struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
+
+       /* Mark the last request eventable if it hasn't been marked already */
+       if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
+               hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
+
+       /* compose a dummy wrb if there are odd set of wrbs to notify */
+       if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
+               wrb_fill_dummy(queue_head_node(txq));
+               queue_head_inc(txq);
+               atomic_inc(&txq->used);
+               txo->pend_wrb_cnt++;
+               hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
+                                          TX_HDR_WRB_NUM_SHIFT);
+               hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
+                                         TX_HDR_WRB_NUM_SHIFT);
+       }
+       be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
+       txo->pend_wrb_cnt = 0;
+}
+
 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
+       bool skip_hw_vlan = false, flush = !skb->xmit_more;
        struct be_adapter *adapter = netdev_priv(netdev);
-       struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
+       u16 q_idx = skb_get_queue_mapping(skb);
+       struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
        struct be_queue_info *txq = &txo->q;
-       bool dummy_wrb, stopped = false;
-       u32 wrb_cnt = 0, copied = 0;
-       bool skip_hw_vlan = false;
-       u32 start = txq->head;
+       u16 wrb_cnt;
 
        skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
-       if (!skb) {
-               tx_stats(txo)->tx_drv_drops++;
-               return NETDEV_TX_OK;
-       }
+       if (unlikely(!skb))
+               goto drop;
 
-       wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
+       wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
+       if (unlikely(!wrb_cnt)) {
+               dev_kfree_skb_any(skb);
+               goto drop;
+       }
 
-       copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
-                             skip_hw_vlan);
-       if (copied) {
-               int gso_segs = skb_shinfo(skb)->gso_segs;
+       if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
+               netif_stop_subqueue(netdev, q_idx);
+               tx_stats(txo)->tx_stops++;
+       }
 
-               /* record the sent skb in the sent_skb table */
-               BUG_ON(txo->sent_skb_list[start]);
-               txo->sent_skb_list[start] = skb;
+       if (flush || __netif_subqueue_stopped(netdev, q_idx))
+               be_xmit_flush(adapter, txo);
 
-               /* Ensure txq has space for the next skb; Else stop the queue
-                * *BEFORE* ringing the tx doorbell, so that we serialze the
-                * tx compls of the current transmit which'll wake up the queue
-                */
-               atomic_add(wrb_cnt, &txq->used);
-               if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
-                                                               txq->len) {
-                       netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
-                       stopped = true;
-               }
-
-               be_txq_notify(adapter, txo, wrb_cnt);
+       return NETDEV_TX_OK;
+drop:
+       tx_stats(txo)->tx_drv_drops++;
+       /* Flush the already enqueued tx requests */
+       if (flush && txo->pend_wrb_cnt)
+               be_xmit_flush(adapter, txo);
 
-               be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
-       } else {
-               txq->head = start;
-               tx_stats(txo)->tx_drv_drops++;
-               dev_kfree_skb_any(skb);
-       }
        return NETDEV_TX_OK;
 }
 
@@ -1096,6 +1113,43 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
        return 0;
 }
 
+static inline bool be_in_all_promisc(struct be_adapter *adapter)
+{
+       return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
+                       BE_IF_FLAGS_ALL_PROMISCUOUS;
+}
+
+static int be_set_vlan_promisc(struct be_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+       int status;
+
+       if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
+               return 0;
+
+       status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
+       if (!status) {
+               dev_info(dev, "Enabled VLAN promiscuous mode\n");
+               adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
+       } else {
+               dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
+       }
+       return status;
+}
+
+static int be_clear_vlan_promisc(struct be_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+       int status;
+
+       status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
+       if (!status) {
+               dev_info(dev, "Disabling VLAN promiscuous mode\n");
+               adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+       }
+       return status;
+}
+
 /*
  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
  * If the user configures more, place BE in vlan promiscuous mode.
@@ -1108,11 +1162,11 @@ static int be_vid_config(struct be_adapter *adapter)
        int status = 0;
 
        /* No need to further configure vids if in promiscuous mode */
-       if (adapter->promiscuous)
+       if (be_in_all_promisc(adapter))
                return 0;
 
        if (adapter->vlans_added > be_max_vlans(adapter))
-               goto set_vlan_promisc;
+               return be_set_vlan_promisc(adapter);
 
        /* Construct VLAN Table to give to HW */
        for_each_set_bit(i, adapter->vids, VLAN_N_VID)
@@ -1120,36 +1174,14 @@ static int be_vid_config(struct be_adapter *adapter)
 
        status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
        if (status) {
+               dev_err(dev, "Setting HW VLAN filtering failed\n");
                /* Set to VLAN promisc mode as setting VLAN filter failed */
                if (addl_status(status) ==
                                MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
-                       goto set_vlan_promisc;
-               dev_err(dev, "Setting HW VLAN filtering failed\n");
-       } else {
-               if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
-                       /* hw VLAN filtering re-enabled. */
-                       status = be_cmd_rx_filter(adapter,
-                                                 BE_FLAGS_VLAN_PROMISC, OFF);
-                       if (!status) {
-                               dev_info(dev,
-                                        "Disabling VLAN Promiscuous mode\n");
-                               adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
-                       }
-               }
+                       return be_set_vlan_promisc(adapter);
+       } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
+               status = be_clear_vlan_promisc(adapter);
        }
-
-       return status;
-
-set_vlan_promisc:
-       if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
-               return 0;
-
-       status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
-       if (!status) {
-               dev_info(dev, "Enable VLAN Promiscuous mode\n");
-               adapter->flags |= BE_FLAGS_VLAN_PROMISC;
-       } else
-               dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
        return status;
 }
 
@@ -1191,79 +1223,99 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
        return be_vid_config(adapter);
 }
 
-static void be_clear_promisc(struct be_adapter *adapter)
+static void be_clear_all_promisc(struct be_adapter *adapter)
 {
-       adapter->promiscuous = false;
-       adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
+       be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
+       adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
+}
 
-       be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
+static void be_set_all_promisc(struct be_adapter *adapter)
+{
+       be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
+       adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
 }
 
-static void be_set_rx_mode(struct net_device *netdev)
+static void be_set_mc_promisc(struct be_adapter *adapter)
 {
-       struct be_adapter *adapter = netdev_priv(netdev);
        int status;
 
-       if (netdev->flags & IFF_PROMISC) {
-               be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
-               adapter->promiscuous = true;
-               goto done;
-       }
+       if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
+               return;
 
-       /* BE was previously in promiscuous mode; disable it */
-       if (adapter->promiscuous) {
-               be_clear_promisc(adapter);
-               if (adapter->vlans_added)
-                       be_vid_config(adapter);
+       status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
+       if (!status)
+               adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
+}
+
+static void be_set_mc_list(struct be_adapter *adapter)
+{
+       int status;
+
+       status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
+       if (!status)
+               adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
+       else
+               be_set_mc_promisc(adapter);
+}
+
+static void be_set_uc_list(struct be_adapter *adapter)
+{
+       struct netdev_hw_addr *ha;
+       int i = 1; /* First slot is claimed by the Primary MAC */
+
+       for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
+               be_cmd_pmac_del(adapter, adapter->if_handle,
+                               adapter->pmac_id[i], 0);
+
+       if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
+               be_set_all_promisc(adapter);
+               return;
        }
 
-       /* Enable multicast promisc if num configured exceeds what we support */
-       if (netdev->flags & IFF_ALLMULTI ||
-           netdev_mc_count(netdev) > be_max_mc(adapter))
-               goto set_mcast_promisc;
+       netdev_for_each_uc_addr(ha, adapter->netdev) {
+               adapter->uc_macs++; /* First slot is for Primary MAC */
+               be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
+                               &adapter->pmac_id[adapter->uc_macs], 0);
+       }
+}
 
-       if (netdev_uc_count(netdev) != adapter->uc_macs) {
-               struct netdev_hw_addr *ha;
-               int i = 1; /* First slot is claimed by the Primary MAC */
+static void be_clear_uc_list(struct be_adapter *adapter)
+{
+       int i;
 
-               for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
-                       be_cmd_pmac_del(adapter, adapter->if_handle,
-                                       adapter->pmac_id[i], 0);
-               }
+       for (i = 1; i < (adapter->uc_macs + 1); i++)
+               be_cmd_pmac_del(adapter, adapter->if_handle,
+                               adapter->pmac_id[i], 0);
+       adapter->uc_macs = 0;
+}
 
-               if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
-                       be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
-                       adapter->promiscuous = true;
-                       goto done;
-               }
+static void be_set_rx_mode(struct net_device *netdev)
+{
+       struct be_adapter *adapter = netdev_priv(netdev);
 
-               netdev_for_each_uc_addr(ha, adapter->netdev) {
-                       adapter->uc_macs++; /* First slot is for Primary MAC */
-                       be_cmd_pmac_add(adapter, (u8 *)ha->addr,
-                                       adapter->if_handle,
-                                       &adapter->pmac_id[adapter->uc_macs], 0);
-               }
+       if (netdev->flags & IFF_PROMISC) {
+               be_set_all_promisc(adapter);
+               return;
        }
 
-       status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
-       if (!status) {
-               if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
-                       adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
-               goto done;
+       /* Interface was previously in promiscuous mode; disable it */
+       if (be_in_all_promisc(adapter)) {
+               be_clear_all_promisc(adapter);
+               if (adapter->vlans_added)
+                       be_vid_config(adapter);
        }
 
-set_mcast_promisc:
-       if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
+       /* Enable multicast promisc if num configured exceeds what we support */
+       if (netdev->flags & IFF_ALLMULTI ||
+           netdev_mc_count(netdev) > be_max_mc(adapter)) {
+               be_set_mc_promisc(adapter);
                return;
+       }
 
-       /* Set to MCAST promisc mode if setting MULTICAST address fails
-        * or if num configured exceeds what we support
-        */
-       status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
-       if (!status)
-               adapter->flags |= BE_FLAGS_MCAST_PROMISC;
-done:
-       return;
+       if (netdev_uc_count(netdev) != adapter->uc_macs)
+               be_set_uc_list(adapter);
+
+       be_set_mc_list(adapter);
 }
 
 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
@@ -1959,32 +2011,34 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
 static u16 be_tx_compl_process(struct be_adapter *adapter,
                               struct be_tx_obj *txo, u16 last_index)
 {
+       struct sk_buff **sent_skbs = txo->sent_skb_list;
        struct be_queue_info *txq = &txo->q;
+       u16 frag_index, num_wrbs = 0;
+       struct sk_buff *skb = NULL;
+       bool unmap_skb_hdr = false;
        struct be_eth_wrb *wrb;
-       struct sk_buff **sent_skbs = txo->sent_skb_list;
-       struct sk_buff *sent_skb;
-       u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
-       bool unmap_skb_hdr = true;
-
-       sent_skb = sent_skbs[txq->tail];
-       BUG_ON(!sent_skb);
-       sent_skbs[txq->tail] = NULL;
-
-       /* skip header wrb */
-       queue_tail_inc(txq);
 
        do {
-               cur_index = txq->tail;
+               if (sent_skbs[txq->tail]) {
+                       /* Free skb from prev req */
+                       if (skb)
+                               dev_consume_skb_any(skb);
+                       skb = sent_skbs[txq->tail];
+                       sent_skbs[txq->tail] = NULL;
+                       queue_tail_inc(txq);  /* skip hdr wrb */
+                       num_wrbs++;
+                       unmap_skb_hdr = true;
+               }
                wrb = queue_tail_node(txq);
+               frag_index = txq->tail;
                unmap_tx_frag(&adapter->pdev->dev, wrb,
-                             (unmap_skb_hdr && skb_headlen(sent_skb)));
+                             (unmap_skb_hdr && skb_headlen(skb)));
                unmap_skb_hdr = false;
-
-               num_wrbs++;
                queue_tail_inc(txq);
-       } while (cur_index != last_index);
+               num_wrbs++;
+       } while (frag_index != last_index);
+       dev_consume_skb_any(skb);
 
-       dev_consume_skb_any(sent_skb);
        return num_wrbs;
 }
 
@@ -2068,12 +2122,11 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
 
 static void be_tx_compl_clean(struct be_adapter *adapter)
 {
+       u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
+       struct device *dev = &adapter->pdev->dev;
        struct be_tx_obj *txo;
        struct be_queue_info *txq;
        struct be_eth_tx_compl *txcp;
-       u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
-       struct sk_buff *sent_skb;
-       bool dummy_wrb;
        int i, pending_txqs;
 
        /* Stop polling for compls when HW has been silent for 10ms */
@@ -2095,7 +2148,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
                                atomic_sub(num_wrbs, &txq->used);
                                timeo = 0;
                        }
-                       if (atomic_read(&txq->used) == 0)
+                       if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
                                pending_txqs--;
                }
 
@@ -2105,21 +2158,29 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
                mdelay(1);
        } while (true);
 
+       /* Free enqueued TX that was never notified to HW */
        for_all_tx_queues(adapter, txo, i) {
                txq = &txo->q;
-               if (atomic_read(&txq->used))
-                       dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
-                               atomic_read(&txq->used));
 
-               /* free posted tx for which compls will never arrive */
-               while (atomic_read(&txq->used)) {
-                       sent_skb = txo->sent_skb_list[txq->tail];
+               if (atomic_read(&txq->used)) {
+                       dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
+                                i, atomic_read(&txq->used));
+                       notified_idx = txq->tail;
                        end_idx = txq->tail;
-                       num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
-                                                  &dummy_wrb);
-                       index_adv(&end_idx, num_wrbs - 1, txq->len);
+                       index_adv(&end_idx, atomic_read(&txq->used) - 1,
+                                 txq->len);
+                       /* Use the tx-compl process logic to handle requests
+                        * that were not sent to the HW.
+                        */
                        num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
                        atomic_sub(num_wrbs, &txq->used);
+                       BUG_ON(atomic_read(&txq->used));
+                       txo->pend_wrb_cnt = 0;
+                       /* Since hw was never notified of these requests,
+                        * reset TXQ indices
+                        */
+                       txq->head = notified_idx;
+                       txq->tail = notified_idx;
                }
        }
 }
@@ -2514,6 +2575,106 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
        }
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline bool be_lock_napi(struct be_eq_obj *eqo)
+{
+       bool status = true;
+
+       spin_lock(&eqo->lock); /* BH is already disabled */
+       if (eqo->state & BE_EQ_LOCKED) {
+               WARN_ON(eqo->state & BE_EQ_NAPI);
+               eqo->state |= BE_EQ_NAPI_YIELD;
+               status = false;
+       } else {
+               eqo->state = BE_EQ_NAPI;
+       }
+       spin_unlock(&eqo->lock);
+       return status;
+}
+
+static inline void be_unlock_napi(struct be_eq_obj *eqo)
+{
+       spin_lock(&eqo->lock); /* BH is already disabled */
+
+       WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
+       eqo->state = BE_EQ_IDLE;
+
+       spin_unlock(&eqo->lock);
+}
+
+static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
+{
+       bool status = true;
+
+       spin_lock_bh(&eqo->lock);
+       if (eqo->state & BE_EQ_LOCKED) {
+               eqo->state |= BE_EQ_POLL_YIELD;
+               status = false;
+       } else {
+               eqo->state |= BE_EQ_POLL;
+       }
+       spin_unlock_bh(&eqo->lock);
+       return status;
+}
+
+static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
+{
+       spin_lock_bh(&eqo->lock);
+
+       WARN_ON(eqo->state & (BE_EQ_NAPI));
+       eqo->state = BE_EQ_IDLE;
+
+       spin_unlock_bh(&eqo->lock);
+}
+
+static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
+{
+       spin_lock_init(&eqo->lock);
+       eqo->state = BE_EQ_IDLE;
+}
+
+static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
+{
+       local_bh_disable();
+
+       /* It's enough to just acquire napi lock on the eqo to stop
+        * be_busy_poll() from processing any queueus.
+        */
+       while (!be_lock_napi(eqo))
+               mdelay(1);
+
+       local_bh_enable();
+}
+
+#else /* CONFIG_NET_RX_BUSY_POLL */
+
+static inline bool be_lock_napi(struct be_eq_obj *eqo)
+{
+       return true;
+}
+
+static inline void be_unlock_napi(struct be_eq_obj *eqo)
+{
+}
+
+static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
+{
+       return false;
+}
+
+static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
+{
+}
+
+static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
+{
+}
+
+static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
+{
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 int be_poll(struct napi_struct *napi, int budget)
 {
        struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
@@ -2833,11 +2994,7 @@ static int be_close(struct net_device *netdev)
        be_tx_compl_clean(adapter);
 
        be_rx_qs_destroy(adapter);
-
-       for (i = 1; i < (adapter->uc_macs + 1); i++)
-               be_cmd_pmac_del(adapter, adapter->if_handle,
-                               adapter->pmac_id[i], 0);
-       adapter->uc_macs = 0;
+       be_clear_uc_list(adapter);
 
        for_all_evt_queues(adapter, eqo, i) {
                if (msix_enabled(adapter))
@@ -3008,6 +3165,19 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
        return status;
 }
 
+static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
+{
+       u32 addr;
+
+       addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
+
+       mac[5] = (u8)(addr & 0xFF);
+       mac[4] = (u8)((addr >> 8) & 0xFF);
+       mac[3] = (u8)((addr >> 16) & 0xFF);
+       /* Use the OUI from the current MAC address */
+       memcpy(mac, adapter->netdev->dev_addr, 3);
+}
+
 /*
  * Generate a seed MAC address from the PF MAC Address using jhash.
  * MAC Address for VFs are assigned incrementally starting from the seed.
@@ -3108,14 +3278,9 @@ static void be_cancel_worker(struct be_adapter *adapter)
 
 static void be_mac_clear(struct be_adapter *adapter)
 {
-       int i;
-
        if (adapter->pmac_id) {
-               for (i = 0; i < (adapter->uc_macs + 1); i++)
-                       be_cmd_pmac_del(adapter, adapter->if_handle,
-                                       adapter->pmac_id[i], 0);
-               adapter->uc_macs = 0;
-
+               be_cmd_pmac_del(adapter, adapter->if_handle,
+                               adapter->pmac_id[0], 0);
                kfree(adapter->pmac_id);
                adapter->pmac_id = NULL;
        }
@@ -3171,13 +3336,32 @@ static int be_clear(struct be_adapter *adapter)
        return 0;
 }
 
+static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
+                       u32 cap_flags, u32 vf)
+{
+       u32 en_flags;
+       int status;
+
+       en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+                  BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
+                  BE_IF_FLAGS_RSS;
+
+       en_flags &= cap_flags;
+
+       status = be_cmd_if_create(adapter, cap_flags, en_flags,
+                                 if_handle, vf);
+
+       return status;
+}
+
 static int be_vfs_if_create(struct be_adapter *adapter)
 {
        struct be_resources res = {0};
        struct be_vf_cfg *vf_cfg;
-       u32 cap_flags, en_flags, vf;
-       int status = 0;
+       u32 cap_flags, vf;
+       int status;
 
+       /* If a FW profile exists, then cap_flags are updated */
        cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
                    BE_IF_FLAGS_MULTICAST;
 
@@ -3189,18 +3373,13 @@ static int be_vfs_if_create(struct be_adapter *adapter)
                                cap_flags = res.if_cap_flags;
                }
 
-               /* If a FW profile exists, then cap_flags are updated */
-               en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
-                                       BE_IF_FLAGS_BROADCAST |
-                                       BE_IF_FLAGS_MULTICAST);
-               status =
-                   be_cmd_if_create(adapter, cap_flags, en_flags,
-                                    &vf_cfg->if_handle, vf + 1);
+               status = be_if_create(adapter, &vf_cfg->if_handle,
+                                     cap_flags, vf + 1);
                if (status)
-                       goto err;
+                       return status;
        }
-err:
-       return status;
+
+       return 0;
 }
 
 static int be_vf_setup_init(struct be_adapter *adapter)
@@ -3385,7 +3564,7 @@ static void be_setup_init(struct be_adapter *adapter)
        adapter->phy.link_speed = -1;
        adapter->if_handle = -1;
        adapter->be3_native = false;
-       adapter->promiscuous = false;
+       adapter->if_flags = 0;
        if (be_physfn(adapter))
                adapter->cmd_privileges = MAX_PRIVILEGES;
        else
@@ -3512,7 +3691,9 @@ static int be_get_config(struct be_adapter *adapter)
        if (status)
                return status;
 
-        if (be_physfn(adapter)) {
+       be_cmd_query_port_name(adapter);
+
+       if (be_physfn(adapter)) {
                status = be_cmd_get_active_profile(adapter, &profile_id);
                if (!status)
                        dev_info(&adapter->pdev->dev,
@@ -3638,10 +3819,20 @@ int be_update_queues(struct be_adapter *adapter)
        return status;
 }
 
+static inline int fw_major_num(const char *fw_ver)
+{
+       int fw_major = 0, i;
+
+       i = sscanf(fw_ver, "%d.", &fw_major);
+       if (i != 1)
+               return 0;
+
+       return fw_major;
+}
+
 static int be_setup(struct be_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
-       u32 tx_fc, rx_fc, en_flags;
        int status;
 
        be_setup_init(adapter);
@@ -3657,13 +3848,8 @@ static int be_setup(struct be_adapter *adapter)
        if (status)
                goto err;
 
-       en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
-                  BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
-       if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
-               en_flags |= BE_IF_FLAGS_RSS;
-       en_flags = en_flags & be_if_cap_flags(adapter);
-       status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
-                                 &adapter->if_handle, 0);
+       status = be_if_create(adapter, &adapter->if_handle,
+                             be_if_cap_flags(adapter), 0);
        if (status)
                goto err;
 
@@ -3696,11 +3882,14 @@ static int be_setup(struct be_adapter *adapter)
 
        be_cmd_get_acpi_wol_cap(adapter);
 
-       be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
+       status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
+                                        adapter->rx_fc);
+       if (status)
+               be_cmd_get_flow_control(adapter, &adapter->tx_fc,
+                                       &adapter->rx_fc);
 
-       if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
-               be_cmd_set_flow_control(adapter, adapter->tx_fc,
-                                       adapter->rx_fc);
+       dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
+                adapter->tx_fc, adapter->rx_fc);
 
        if (be_physfn(adapter))
                be_cmd_set_logical_link_config(adapter,
@@ -3739,7 +3928,7 @@ static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
 
 static bool phy_flashing_required(struct be_adapter *adapter)
 {
-       return (adapter->phy.phy_type == TN_8022 &&
+       return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
                adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
 }
 
@@ -3790,7 +3979,8 @@ static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
        int status;
        u8 crc[4];
 
-       status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
+       status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
+                                     img_size - 4);
        if (status)
                return status;
 
@@ -3806,13 +3996,13 @@ static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
 }
 
 static int be_flash(struct be_adapter *adapter, const u8 *img,
-                   struct be_dma_mem *flash_cmd, int optype, int img_size)
+                   struct be_dma_mem *flash_cmd, int optype, int img_size,
+                   u32 img_offset)
 {
+       u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
        struct be_cmd_write_flashrom *req = flash_cmd->va;
-       u32 total_bytes, flash_op, num_bytes;
        int status;
 
-       total_bytes = img_size;
        while (total_bytes) {
                num_bytes = min_t(u32, 32*1024, total_bytes);
 
@@ -3833,12 +4023,15 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
                memcpy(req->data_buf, img, num_bytes);
                img += num_bytes;
                status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
-                                              flash_op, num_bytes);
+                                              flash_op, img_offset +
+                                              bytes_sent, num_bytes);
                if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
                    optype == OPTYPE_PHY_FW)
                        break;
                else if (status)
                        return status;
+
+               bytes_sent += num_bytes;
        }
        return 0;
 }
@@ -3906,6 +4099,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
                pflashcomp = gen2_flash_types;
                filehdr_size = sizeof(struct flash_file_hdr_g2);
                num_comp = ARRAY_SIZE(gen2_flash_types);
+               img_hdrs_size = 0;
        }
 
        /* Get flash section info*/
@@ -3950,7 +4144,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
                        return -1;
 
                status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
-                                 pflashcomp[i].size);
+                                 pflashcomp[i].size, 0);
                if (status) {
                        dev_err(dev, "Flashing section type 0x%x failed\n",
                                pflashcomp[i].img_type);
@@ -4017,12 +4211,12 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
                            struct be_dma_mem *flash_cmd, int num_of_images)
 {
        int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
+       bool crc_match, old_fw_img, flash_offset_support = true;
        struct device *dev = &adapter->pdev->dev;
        struct flash_section_info *fsec = NULL;
        u32 img_offset, img_size, img_type;
+       u16 img_optype, flash_optype;
        int status, i, filehdr_size;
-       bool crc_match, old_fw_img;
-       u16 img_optype;
        const u8 *p;
 
        filehdr_size = sizeof(struct flash_file_hdr_g3);
@@ -4032,6 +4226,7 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
                return -EINVAL;
        }
 
+retry_flash:
        for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
                img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
                img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
@@ -4041,6 +4236,12 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
 
                if (img_optype == 0xFFFF)
                        continue;
+
+               if (flash_offset_support)
+                       flash_optype = OPTYPE_OFFSET_SPECIFIED;
+               else
+                       flash_optype = img_optype;
+
                /* Don't bother verifying CRC if an old FW image is being
                 * flashed
                 */
@@ -4049,16 +4250,26 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
 
                status = be_check_flash_crc(adapter, fw->data, img_offset,
                                            img_size, filehdr_size +
-                                           img_hdrs_size, img_optype,
+                                           img_hdrs_size, flash_optype,
                                            &crc_match);
-               /* The current FW image on the card does not recognize the new
-                * FLASH op_type. The FW download is partially complete.
-                * Reboot the server now to enable FW image to recognize the
-                * new FLASH op_type. To complete the remaining process,
-                * download the same FW again after the reboot.
-                */
                if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
                    base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
+                       /* The current FW image on the card does not support
+                        * OFFSET based flashing. Retry using older mechanism
+                        * of OPTYPE based flashing
+                        */
+                       if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
+                               flash_offset_support = false;
+                               goto retry_flash;
+                       }
+
+                       /* The current FW image on the card does not recognize
+                        * the new FLASH op_type. The FW download is partially
+                        * complete. Reboot the server now to enable FW image
+                        * to recognize the new FLASH op_type. To complete the
+                        * remaining process, download the same FW again after
+                        * the reboot.
+                        */
                        dev_err(dev, "Flash incomplete. Reset the server\n");
                        dev_err(dev, "Download FW image again after reset\n");
                        return -EAGAIN;
@@ -4076,7 +4287,19 @@ flash:
                if (p + img_size > fw->data + fw->size)
                        return -1;
 
-               status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
+               status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
+                                 img_offset);
+
+               /* The current FW image on the card does not support OFFSET
+                * based flashing. Retry using older mechanism of OPTYPE based
+                * flashing
+                */
+               if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
+                   flash_optype == OPTYPE_OFFSET_SPECIFIED) {
+                       flash_offset_support = false;
+                       goto retry_flash;
+               }
+
                /* For old FW images ignore ILLEGAL_FIELD error or errors on
                 * UFI_DIR region
                 */
@@ -4179,98 +4402,105 @@ static int lancer_fw_download(struct be_adapter *adapter,
        return 0;
 }
 
-#define UFI_TYPE2              2
-#define UFI_TYPE3              3
-#define UFI_TYPE3R             10
-#define UFI_TYPE4              4
+#define BE2_UFI                2
+#define BE3_UFI                3
+#define BE3R_UFI       10
+#define SH_UFI         4
+#define SH_P2_UFI      11
+
 static int be_get_ufi_type(struct be_adapter *adapter,
                           struct flash_file_hdr_g3 *fhdr)
 {
-       if (!fhdr)
-               goto be_get_ufi_exit;
+       if (!fhdr) {
+               dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
+               return -1;
+       }
 
-       if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
-               return UFI_TYPE4;
-       else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
-               if (fhdr->asic_type_rev == 0x10)
-                       return UFI_TYPE3R;
-               else
-                       return UFI_TYPE3;
-       } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
-               return UFI_TYPE2;
+       /* First letter of the build version is used to identify
+        * which chip this image file is meant for.
+        */
+       switch (fhdr->build[0]) {
+       case BLD_STR_UFI_TYPE_SH:
+               return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
+                                                               SH_UFI;
+       case BLD_STR_UFI_TYPE_BE3:
+               return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
+                                                               BE3_UFI;
+       case BLD_STR_UFI_TYPE_BE2:
+               return BE2_UFI;
+       default:
+               return -1;
+       }
+}
 
-be_get_ufi_exit:
-       dev_err(&adapter->pdev->dev,
-               "UFI and Interface are not compatible for flashing\n");
-       return -1;
+/* Check if the flash image file is compatible with the adapter that
+ * is being flashed.
+ * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
+ * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
+ */
+static bool be_check_ufi_compatibility(struct be_adapter *adapter,
+                                      struct flash_file_hdr_g3 *fhdr)
+{
+       int ufi_type = be_get_ufi_type(adapter, fhdr);
+
+       switch (ufi_type) {
+       case SH_P2_UFI:
+               return skyhawk_chip(adapter);
+       case SH_UFI:
+               return (skyhawk_chip(adapter) &&
+                       adapter->asic_rev < ASIC_REV_P2);
+       case BE3R_UFI:
+               return BE3_chip(adapter);
+       case BE3_UFI:
+               return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
+       case BE2_UFI:
+               return BE2_chip(adapter);
+       default:
+               return false;
+       }
 }
 
 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
 {
+       struct device *dev = &adapter->pdev->dev;
        struct flash_file_hdr_g3 *fhdr3;
-       struct image_hdr *img_hdr_ptr = NULL;
+       struct image_hdr *img_hdr_ptr;
+       int status = 0, i, num_imgs;
        struct be_dma_mem flash_cmd;
-       const u8 *p;
-       int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
 
-       flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
-       flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
-                                         &flash_cmd.dma, GFP_KERNEL);
-       if (!flash_cmd.va) {
-               status = -ENOMEM;
-               goto be_fw_exit;
+       fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
+       if (!be_check_ufi_compatibility(adapter, fhdr3)) {
+               dev_err(dev, "Flash image is not compatible with adapter\n");
+               return -EINVAL;
        }
 
-       p = fw->data;
-       fhdr3 = (struct flash_file_hdr_g3 *)p;
-
-       ufi_type = be_get_ufi_type(adapter, fhdr3);
+       flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
+       flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+                                         GFP_KERNEL);
+       if (!flash_cmd.va)
+               return -ENOMEM;
 
        num_imgs = le32_to_cpu(fhdr3->num_imgs);
        for (i = 0; i < num_imgs; i++) {
                img_hdr_ptr = (struct image_hdr *)(fw->data +
                                (sizeof(struct flash_file_hdr_g3) +
                                 i * sizeof(struct image_hdr)));
-               if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
-                       switch (ufi_type) {
-                       case UFI_TYPE4:
-                               status = be_flash_skyhawk(adapter, fw,
-                                                         &flash_cmd, num_imgs);
-                               break;
-                       case UFI_TYPE3R:
-                               status = be_flash_BEx(adapter, fw, &flash_cmd,
-                                                     num_imgs);
-                               break;
-                       case UFI_TYPE3:
-                               /* Do not flash this ufi on BE3-R cards */
-                               if (adapter->asic_rev < 0x10)
-                                       status = be_flash_BEx(adapter, fw,
-                                                             &flash_cmd,
-                                                             num_imgs);
-                               else {
-                                       status = -EINVAL;
-                                       dev_err(&adapter->pdev->dev,
-                                               "Can't load BE3 UFI on BE3R\n");
-                               }
-                       }
-               }
-       }
-
-       if (ufi_type == UFI_TYPE2)
-               status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
-       else if (ufi_type == -1)
-               status = -EINVAL;
+               if (!BE2_chip(adapter) &&
+                   le32_to_cpu(img_hdr_ptr->imageid) != 1)
+                       continue;
 
-       dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
-                         flash_cmd.dma);
-       if (status) {
-               dev_err(&adapter->pdev->dev, "Firmware load error\n");
-               goto be_fw_exit;
+               if (skyhawk_chip(adapter))
+                       status = be_flash_skyhawk(adapter, fw, &flash_cmd,
+                                                 num_imgs);
+               else
+                       status = be_flash_BEx(adapter, fw, &flash_cmd,
+                                             num_imgs);
        }
 
-       dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
+       dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
+       if (!status)
+               dev_info(dev, "Firmware flashed successfully\n");
 
-be_fw_exit:
        return status;
 }
 
@@ -4304,7 +4534,8 @@ fw_exit:
        return status;
 }
 
-static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
+static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+                                u16 flags)
 {
        struct be_adapter *adapter = netdev_priv(dev);
        struct nlattr *attr, *br_spec;
@@ -4383,8 +4614,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
  * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
  * is expected to work across all types of IP tunnels once exported. Skyhawk
  * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
- * offloads in hw_enc_features only when a VxLAN port is added. Note this only
- * ensures that other tunnels work fine while VxLAN offloads are not enabled.
+ * offloads in hw_enc_features only when a VxLAN port is added. If other (non
+ * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
+ * those other tunnels are unexported on the fly through ndo_features_check().
  *
  * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
  * adds more than one port, disable offloads and don't re-enable them again
@@ -4463,7 +4695,41 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
                                           struct net_device *dev,
                                           netdev_features_t features)
 {
-       return vxlan_features_check(skb, features);
+       struct be_adapter *adapter = netdev_priv(dev);
+       u8 l4_hdr = 0;
+
+       /* The code below restricts offload features for some tunneled packets.
+        * Offload features for normal (non tunnel) packets are unchanged.
+        */
+       if (!skb->encapsulation ||
+           !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
+               return features;
+
+       /* It's an encapsulated packet and VxLAN offloads are enabled. We
+        * should disable tunnel offload features if it's not a VxLAN packet,
+        * as tunnel offloads have been enabled only for VxLAN. This is done to
+        * allow other tunneled traffic like GRE work fine while VxLAN
+        * offloads are configured in Skyhawk-R.
+        */
+       switch (vlan_get_protocol(skb)) {
+       case htons(ETH_P_IP):
+               l4_hdr = ip_hdr(skb)->protocol;
+               break;
+       case htons(ETH_P_IPV6):
+               l4_hdr = ipv6_hdr(skb)->nexthdr;
+               break;
+       default:
+               return features;
+       }
+
+       if (l4_hdr != IPPROTO_UDP ||
+           skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+           skb->inner_protocol != htons(ETH_P_TEB) ||
+           skb_inner_mac_header(skb) - skb_transport_header(skb) !=
+           sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+               return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+
+       return features;
 }
 #endif
 
@@ -4797,6 +5063,20 @@ static void be_func_recovery_task(struct work_struct *work)
                                      msecs_to_jiffies(1000));
 }
 
+static void be_log_sfp_info(struct be_adapter *adapter)
+{
+       int status;
+
+       status = be_cmd_query_sfp_info(adapter);
+       if (!status) {
+               dev_err(&adapter->pdev->dev,
+                       "Unqualified SFP+ detected on %c from %s part no: %s",
+                       adapter->port_name, adapter->phy.vendor_name,
+                       adapter->phy.vendor_pn);
+       }
+       adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
+}
+
 static void be_worker(struct work_struct *work)
 {
        struct be_adapter *adapter =
@@ -4835,6 +5115,9 @@ static void be_worker(struct work_struct *work)
 
        be_eqd_update(adapter);
 
+       if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
+               be_log_sfp_info(adapter);
+
 reschedule:
        adapter->work_counter++;
        schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
@@ -4881,12 +5164,31 @@ static inline char *func_name(struct be_adapter *adapter)
        return be_physfn(adapter) ? "PF" : "VF";
 }
 
+static inline char *nic_name(struct pci_dev *pdev)
+{
+       switch (pdev->device) {
+       case OC_DEVICE_ID1:
+               return OC_NAME;
+       case OC_DEVICE_ID2:
+               return OC_NAME_BE;
+       case OC_DEVICE_ID3:
+       case OC_DEVICE_ID4:
+               return OC_NAME_LANCER;
+       case BE_DEVICE_ID2:
+               return BE3_NAME;
+       case OC_DEVICE_ID5:
+       case OC_DEVICE_ID6:
+               return OC_NAME_SH;
+       default:
+               return BE_NAME;
+       }
+}
+
 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
 {
-       int status = 0;
        struct be_adapter *adapter;
        struct net_device *netdev;
-       char port_name;
+       int status = 0;
 
        dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
 
@@ -4980,10 +5282,8 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
        schedule_delayed_work(&adapter->func_recovery_work,
                              msecs_to_jiffies(1000));
 
-       be_cmd_query_port_name(adapter, &port_name);
-
        dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
-                func_name(adapter), mc_name(adapter), port_name);
+                func_name(adapter), mc_name(adapter), adapter->port_name);
 
        return 0;
 
@@ -5048,6 +5348,10 @@ static int be_resume(struct pci_dev *pdev)
        if (status)
                return status;
 
+       status = be_cmd_reset_function(adapter);
+       if (status)
+               return status;
+
        be_intr_set(adapter, true);
        /* tell fw we're ready to fire cmds */
        status = be_cmd_fw_init(adapter);
index 270308315d437e8ace4dc8da93af7789b7a499f9..ba84c4a9ce32fd2d677b42e49531aab5a0433d97 100644 (file)
@@ -69,7 +69,8 @@ config FSL_XGMAC_MDIO
        select PHYLIB
        select OF_MDIO
        ---help---
-         This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
+         This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
+         on the FMan mEMAC (which supports both Clauses 22 and 45)
 
 config UCC_GETH
        tristate "Freescale QE Gigabit Ethernet"
index d77a96fdf1dd332f49a80bbad9bede686c3acfa5..a86af8a7485dad1be3caf4a55b6d77c7c7b5c884 100644 (file)
@@ -426,6 +426,8 @@ struct bufdesc_ex {
  * (40ns * 6).
  */
 #define FEC_QUIRK_BUG_CAPTURE          (1 << 10)
+/* Controller has only one MDIO bus */
+#define FEC_QUIRK_SINGLE_MDIO          (1 << 11)
 
 struct fec_enet_priv_tx_q {
        int index;
index 49cd358c30fa68281c274394ccd4e341f44f5c59..9bb6220663b21a505f2332f028ce3a1e13b77f86 100644 (file)
@@ -91,7 +91,8 @@ static struct platform_device_id fec_devtype[] = {
                .driver_data = 0,
        }, {
                .name = "imx28-fec",
-               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+                               FEC_QUIRK_SINGLE_MDIO,
        }, {
                .name = "imx6q-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@@ -1188,12 +1189,13 @@ static void
 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
 {
        struct  fec_enet_private *fep;
-       struct bufdesc *bdp;
+       struct bufdesc *bdp, *bdp_t;
        unsigned short status;
        struct  sk_buff *skb;
        struct fec_enet_priv_tx_q *txq;
        struct netdev_queue *nq;
        int     index = 0;
+       int     i, bdnum;
        int     entries_free;
 
        fep = netdev_priv(ndev);
@@ -1214,18 +1216,29 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
                if (bdp == txq->cur_tx)
                        break;
 
-               index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
-
+               bdp_t = bdp;
+               bdnum = 1;
+               index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
                skb = txq->tx_skbuff[index];
-               txq->tx_skbuff[index] = NULL;
-               if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
-                       dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
-                                       bdp->cbd_datlen, DMA_TO_DEVICE);
-               bdp->cbd_bufaddr = 0;
-               if (!skb) {
-                       bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
-                       continue;
+               while (!skb) {
+                       bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
+                       index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
+                       skb = txq->tx_skbuff[index];
+                       bdnum++;
                }
+               if (skb_shinfo(skb)->nr_frags &&
+                   (status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
+                       break;
+
+               for (i = 0; i < bdnum; i++) {
+                       if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
+                               dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+                                                bdp->cbd_datlen, DMA_TO_DEVICE);
+                       bdp->cbd_bufaddr = 0;
+                       if (i < bdnum - 1)
+                               bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+               }
+               txq->tx_skbuff[index] = NULL;
 
                /* Check for errors. */
                if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1956,7 +1969,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        int err = -ENXIO, i;
 
        /*
-        * The dual fec interfaces are not equivalent with enet-mac.
+        * The i.MX28 dual fec interfaces are not equal.
         * Here are the differences:
         *
         *  - fec0 supports MII & RMII modes while fec1 only supports RMII
@@ -1971,7 +1984,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
         * mdio interface in board design, and need to be configured by
         * fec0 mii_bus.
         */
-       if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
+       if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
                /* fec1 uses fec0 mii_bus */
                if (mii_cnt && fec0_mii_bus) {
                        fep->mii_bus = fec0_mii_bus;
@@ -2034,7 +2047,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        mii_cnt++;
 
        /* save fec0 mii_bus */
-       if (fep->quirks & FEC_QUIRK_ENET_MAC)
+       if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
                fec0_mii_bus = fep->mii_bus;
 
        return 0;
@@ -2583,12 +2596,9 @@ static void fec_enet_free_queue(struct net_device *ndev)
                }
 
        for (i = 0; i < fep->num_rx_queues; i++)
-               if (fep->rx_queue[i])
-                       kfree(fep->rx_queue[i]);
-
+               kfree(fep->rx_queue[i]);
        for (i = 0; i < fep->num_tx_queues; i++)
-               if (fep->tx_queue[i])
-                       kfree(fep->tx_queue[i]);
+               kfree(fep->tx_queue[i]);
 }
 
 static int fec_enet_alloc_queue(struct net_device *ndev)
@@ -3191,6 +3201,7 @@ fec_probe(struct platform_device *pdev)
                pdev->id_entry = of_id->data;
        fep->quirks = pdev->id_entry->driver_data;
 
+       fep->netdev = ndev;
        fep->num_rx_queues = num_rx_qs;
        fep->num_tx_queues = num_tx_qs;
 
index 9e2bcb8079236d4e579de07fc43811bfc2801897..a17628769a1f0de4c749ac59138d300b8e02e07f 100644 (file)
@@ -278,14 +278,20 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
                        fep->stats.collisions++;
 
                /* unmap */
-               dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
-                               skb->len, DMA_TO_DEVICE);
+               if (fep->mapped_as_page[dirtyidx])
+                       dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
+                                      CBDR_DATLEN(bdp), DMA_TO_DEVICE);
+               else
+                       dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+                                        CBDR_DATLEN(bdp), DMA_TO_DEVICE);
 
                /*
                 * Free the sk buffer associated with this last transmit.
                 */
-               dev_kfree_skb(skb);
-               fep->tx_skbuff[dirtyidx] = NULL;
+               if (skb) {
+                       dev_kfree_skb(skb);
+                       fep->tx_skbuff[dirtyidx] = NULL;
+               }
 
                /*
                 * Update pointer to next buffer descriptor to be transmitted.
@@ -299,7 +305,7 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
                 * Since we have freed up a buffer, the ring is no longer
                 * full.
                 */
-               if (!fep->tx_free++)
+               if (++fep->tx_free >= MAX_SKB_FRAGS)
                        do_wake = 1;
                has_tx_work = 1;
        }
@@ -509,6 +515,9 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        cbd_t __iomem *bdp;
        int curidx;
        u16 sc;
+       int nr_frags = skb_shinfo(skb)->nr_frags;
+       skb_frag_t *frag;
+       int len;
 
 #ifdef CONFIG_FS_ENET_MPC5121_FEC
        if (((unsigned long)skb->data) & 0x3) {
@@ -530,7 +539,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        bdp = fep->cur_tx;
 
-       if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
+       if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
                netif_stop_queue(dev);
                spin_unlock(&fep->tx_lock);
 
@@ -543,35 +552,42 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        curidx = bdp - fep->tx_bd_base;
-       /*
-        * Clear all of the status flags.
-        */
-       CBDC_SC(bdp, BD_ENET_TX_STATS);
-
-       /*
-        * Save skb pointer.
-        */
-       fep->tx_skbuff[curidx] = skb;
-
-       fep->stats.tx_bytes += skb->len;
 
+       len = skb->len;
+       fep->stats.tx_bytes += len;
+       if (nr_frags)
+               len -= skb->data_len;
+       fep->tx_free -= nr_frags + 1;
        /*
         * Push the data cache so the CPM does not get stale memory data.
         */
        CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
-                               skb->data, skb->len, DMA_TO_DEVICE));
-       CBDW_DATLEN(bdp, skb->len);
+                               skb->data, len, DMA_TO_DEVICE));
+       CBDW_DATLEN(bdp, len);
+
+       fep->mapped_as_page[curidx] = 0;
+       frag = skb_shinfo(skb)->frags;
+       while (nr_frags) {
+               CBDC_SC(bdp,
+                       BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+               CBDS_SC(bdp, BD_ENET_TX_READY);
+
+               if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
+                       bdp++, curidx++;
+               else
+                       bdp = fep->tx_bd_base, curidx = 0;
 
-       /*
-        * If this was the last BD in the ring, start at the beginning again.
-        */
-       if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
-               fep->cur_tx++;
-       else
-               fep->cur_tx = fep->tx_bd_base;
+               len = skb_frag_size(frag);
+               CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len,
+                                                  DMA_TO_DEVICE));
+               CBDW_DATLEN(bdp, len);
 
-       if (!--fep->tx_free)
-               netif_stop_queue(dev);
+               fep->tx_skbuff[curidx] = NULL;
+               fep->mapped_as_page[curidx] = 1;
+
+               frag++;
+               nr_frags--;
+       }
 
        /* Trigger transmission start */
        sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
@@ -582,8 +598,22 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * yay for hw reuse :) */
        if (skb->len <= 60)
                sc |= BD_ENET_TX_PAD;
+       CBDC_SC(bdp, BD_ENET_TX_STATS);
        CBDS_SC(bdp, sc);
 
+       /* Save skb pointer. */
+       fep->tx_skbuff[curidx] = skb;
+
+       /* If this was the last BD in the ring, start at the beginning again. */
+       if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
+               bdp++;
+       else
+               bdp = fep->tx_bd_base;
+       fep->cur_tx = bdp;
+
+       if (fep->tx_free < MAX_SKB_FRAGS)
+               netif_stop_queue(dev);
+
        skb_tx_timestamp(skb);
 
        (*fep->ops->tx_kickstart)(dev);
@@ -917,7 +947,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
        }
 
        fpi->rx_ring = 32;
-       fpi->tx_ring = 32;
+       fpi->tx_ring = 64;
        fpi->rx_copybreak = 240;
        fpi->napi_weight = 17;
        fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
@@ -955,7 +985,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
 
        privsize = sizeof(*fep) +
                   sizeof(struct sk_buff **) *
-                  (fpi->rx_ring + fpi->tx_ring);
+                    (fpi->rx_ring + fpi->tx_ring) +
+                  sizeof(char) * fpi->tx_ring;
 
        ndev = alloc_etherdev(privsize);
        if (!ndev) {
@@ -978,6 +1009,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
 
        fep->rx_skbuff = (struct sk_buff **)&fep[1];
        fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
+       fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring +
+                                      fpi->tx_ring);
 
        spin_lock_init(&fep->lock);
        spin_lock_init(&fep->tx_lock);
@@ -1007,6 +1040,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
 
        netif_carrier_off(ndev);
 
+       ndev->features |= NETIF_F_SG;
+
        ret = register_netdev(ndev);
        if (ret)
                goto out_free_bd;
index 3a4b49e0e717b980e09c54d4cf98f42b56b56e53..f184d8f952e21d269e33179ea1abb1e36b16dda2 100644 (file)
@@ -134,6 +134,7 @@ struct fs_enet_private {
        void __iomem *ring_base;
        struct sk_buff **rx_skbuff;
        struct sk_buff **tx_skbuff;
+       char *mapped_as_page;
        cbd_t __iomem *rx_bd_base;      /* Address of Rx and Tx buffers.    */
        cbd_t __iomem *tx_bd_base;
        cbd_t __iomem *dirty_tx;        /* ring entries to be free()ed.     */
index e54b1e39f9b4fd28e967ceac85f9a460da11da16..43df78882e484e065706bd04c322fa8276d4c424 100644 (file)
@@ -764,7 +764,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
        u32 *tx_queues, *rx_queues;
        unsigned short mode, poll_mode;
 
-       if (!np || !of_device_is_available(np))
+       if (!np)
                return -ENODEV;
 
        if (of_device_is_compatible(np, "fsl,etsec2")) {
@@ -2170,7 +2170,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
 {
        fcb->flags |= TXFCB_VLN;
-       fcb->vlctl = vlan_tx_tag_get(skb);
+       fcb->vlctl = skb_vlan_tag_get(skb);
 }
 
 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
@@ -2230,7 +2230,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        regs = tx_queue->grp->regs;
 
        do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
-       do_vlan = vlan_tx_tag_present(skb);
+       do_vlan = skb_vlan_tag_present(skb);
        do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
                    priv->hwts_tx_en;
 
index 3e1a9c1a67a95ffdaddbcca9739a9e37dee66eac..fda12fb32ec77a8538a0f1d1370d2e653c91856c 100644 (file)
@@ -1586,7 +1586,7 @@ static int gfar_write_filer_table(struct gfar_private *priv,
                return -EBUSY;
 
        /* Fill regular entries */
-       for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
+       for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
             i++)
                gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
        /* Fill the rest with fall-troughs */
index a35244586a631684fc6eb4b33fd8ec7f3935c149..3a83bc2c613ce0e907831a264070970acfd8eccf 100644 (file)
@@ -32,18 +32,19 @@ struct tgec_mdio_controller {
        __be32  mdio_addr;      /* MDIO address */
 } __packed;
 
+#define MDIO_STAT_ENC          BIT(6)
 #define MDIO_STAT_CLKDIV(x)    (((x>>1) & 0xff) << 8)
-#define MDIO_STAT_BSY          (1 << 0)
-#define MDIO_STAT_RD_ER                (1 << 1)
+#define MDIO_STAT_BSY          BIT(0)
+#define MDIO_STAT_RD_ER                BIT(1)
 #define MDIO_CTL_DEV_ADDR(x)   (x & 0x1f)
 #define MDIO_CTL_PORT_ADDR(x)  ((x & 0x1f) << 5)
-#define MDIO_CTL_PRE_DIS       (1 << 10)
-#define MDIO_CTL_SCAN_EN       (1 << 11)
-#define MDIO_CTL_POST_INC      (1 << 14)
-#define MDIO_CTL_READ          (1 << 15)
+#define MDIO_CTL_PRE_DIS       BIT(10)
+#define MDIO_CTL_SCAN_EN       BIT(11)
+#define MDIO_CTL_POST_INC      BIT(14)
+#define MDIO_CTL_READ          BIT(15)
 
 #define MDIO_DATA(x)           (x & 0xffff)
-#define MDIO_DATA_BSY          (1 << 31)
+#define MDIO_DATA_BSY          BIT(31)
 
 /*
  * Wait until the MDIO bus is free
@@ -51,12 +52,16 @@ struct tgec_mdio_controller {
 static int xgmac_wait_until_free(struct device *dev,
                                 struct tgec_mdio_controller __iomem *regs)
 {
-       uint32_t status;
+       unsigned int timeout;
 
        /* Wait till the bus is free */
-       status = spin_event_timeout(
-               !((in_be32(&regs->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0);
-       if (!status) {
+       timeout = TIMEOUT;
+       while ((ioread32be(&regs->mdio_stat) & MDIO_STAT_BSY) && timeout) {
+               cpu_relax();
+               timeout--;
+       }
+
+       if (!timeout) {
                dev_err(dev, "timeout waiting for bus to be free\n");
                return -ETIMEDOUT;
        }
@@ -70,12 +75,16 @@ static int xgmac_wait_until_free(struct device *dev,
 static int xgmac_wait_until_done(struct device *dev,
                                 struct tgec_mdio_controller __iomem *regs)
 {
-       uint32_t status;
+       unsigned int timeout;
 
        /* Wait till the MDIO write is complete */
-       status = spin_event_timeout(
-               !((in_be32(&regs->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0);
-       if (!status) {
+       timeout = TIMEOUT;
+       while ((ioread32be(&regs->mdio_data) & MDIO_DATA_BSY) && timeout) {
+               cpu_relax();
+               timeout--;
+       }
+
+       if (!timeout) {
                dev_err(dev, "timeout waiting for operation to complete\n");
                return -ETIMEDOUT;
        }
@@ -91,22 +100,42 @@ static int xgmac_wait_until_done(struct device *dev,
 static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
 {
        struct tgec_mdio_controller __iomem *regs = bus->priv;
-       uint16_t dev_addr = regnum >> 16;
+       uint16_t dev_addr;
+       u32 mdio_ctl, mdio_stat;
        int ret;
 
-       /* Set the port and dev addr */
-       out_be32(&regs->mdio_ctl,
-                MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
+       mdio_stat = ioread32be(&regs->mdio_stat);
+       if (regnum & MII_ADDR_C45) {
+               /* Clause 45 (ie 10G) */
+               dev_addr = (regnum >> 16) & 0x1f;
+               mdio_stat |= MDIO_STAT_ENC;
+       } else {
+               /* Clause 22 (ie 1G) */
+               dev_addr = regnum & 0x1f;
+               mdio_stat &= ~MDIO_STAT_ENC;
+       }
 
-       /* Set the register address */
-       out_be32(&regs->mdio_addr, regnum & 0xffff);
+       iowrite32be(mdio_stat, &regs->mdio_stat);
 
        ret = xgmac_wait_until_free(&bus->dev, regs);
        if (ret)
                return ret;
 
+       /* Set the port and dev addr */
+       mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+       iowrite32be(mdio_ctl, &regs->mdio_ctl);
+
+       /* Set the register address */
+       if (regnum & MII_ADDR_C45) {
+               iowrite32be(regnum & 0xffff, &regs->mdio_addr);
+
+               ret = xgmac_wait_until_free(&bus->dev, regs);
+               if (ret)
+                       return ret;
+       }
+
        /* Write the value to the register */
-       out_be32(&regs->mdio_data, MDIO_DATA(value));
+       iowrite32be(MDIO_DATA(value), &regs->mdio_data);
 
        ret = xgmac_wait_until_done(&bus->dev, regs);
        if (ret)
@@ -123,38 +152,56 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
 static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
 {
        struct tgec_mdio_controller __iomem *regs = bus->priv;
-       uint16_t dev_addr = regnum >> 16;
+       uint16_t dev_addr;
+       uint32_t mdio_stat;
        uint32_t mdio_ctl;
        uint16_t value;
        int ret;
 
-       /* Set the Port and Device Addrs */
-       mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
-       out_be32(&regs->mdio_ctl, mdio_ctl);
+       mdio_stat = ioread32be(&regs->mdio_stat);
+       if (regnum & MII_ADDR_C45) {
+               dev_addr = (regnum >> 16) & 0x1f;
+               mdio_stat |= MDIO_STAT_ENC;
+       } else {
+               dev_addr = regnum & 0x1f;
+               mdio_stat &= ~MDIO_STAT_ENC;
+       }
 
-       /* Set the register address */
-       out_be32(&regs->mdio_addr, regnum & 0xffff);
+       iowrite32be(mdio_stat, &regs->mdio_stat);
 
        ret = xgmac_wait_until_free(&bus->dev, regs);
        if (ret)
                return ret;
 
+       /* Set the Port and Device Addrs */
+       mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+       iowrite32be(mdio_ctl, &regs->mdio_ctl);
+
+       /* Set the register address */
+       if (regnum & MII_ADDR_C45) {
+               iowrite32be(regnum & 0xffff, &regs->mdio_addr);
+
+               ret = xgmac_wait_until_free(&bus->dev, regs);
+               if (ret)
+                       return ret;
+       }
+
        /* Initiate the read */
-       out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
+       iowrite32be(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl);
 
        ret = xgmac_wait_until_done(&bus->dev, regs);
        if (ret)
                return ret;
 
        /* Return all Fs if nothing was there */
-       if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
+       if (ioread32be(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
                dev_err(&bus->dev,
                        "Error while reading PHY%d reg at %d.%hhu\n",
                        phy_id, dev_addr, regnum);
                return 0xffff;
        }
 
-       value = in_be32(&regs->mdio_data) & 0xffff;
+       value = ioread32be(&regs->mdio_data) & 0xffff;
        dev_dbg(&bus->dev, "read %04x\n", value);
 
        return value;
@@ -224,6 +271,9 @@ static struct of_device_id xgmac_mdio_match[] = {
        {
                .compatible = "fsl,fman-xmdio",
        },
+       {
+               .compatible = "fsl,fman-memac-mdio",
+       },
        {},
 };
 MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
index e9421731b05e729af91f7c1b127de9e2bad0c070..a54d89791311465d89f6b1b59bc2b4f3fdb358d4 100644 (file)
@@ -24,4 +24,13 @@ config HIX5HD2_GMAC
        help
          This selects the hix5hd2 mac family network device.
 
+config HIP04_ETH
+       tristate "HISILICON P04 Ethernet support"
+       select PHYLIB
+       select MARVELL_PHY
+       select MFD_SYSCON
+       ---help---
+         If you wish to compile a kernel for a hardware with hisilicon p04 SoC and
+         want to use the internal ethernet then you should answer Y to this.
+
 endif # NET_VENDOR_HISILICON
index 9175e84622d4b06a9604196ac1a962a8ae7ed9f9..6c14540a4dc5e18c6049aaa24e6e8184af99ddd7 100644 (file)
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
+obj-$(CONFIG_HIP04_ETH) += hip04_mdio.o hip04_eth.o
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
new file mode 100644 (file)
index 0000000..b72d238
--- /dev/null
@@ -0,0 +1,971 @@
+
+/* Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#define PPE_CFG_RX_ADDR                        0x100
+#define PPE_CFG_POOL_GRP               0x300
+#define PPE_CFG_RX_BUF_SIZE            0x400
+#define PPE_CFG_RX_FIFO_SIZE           0x500
+#define PPE_CURR_BUF_CNT               0xa200
+
+#define GE_DUPLEX_TYPE                 0x08
+#define GE_MAX_FRM_SIZE_REG            0x3c
+#define GE_PORT_MODE                   0x40
+#define GE_PORT_EN                     0x44
+#define GE_SHORT_RUNTS_THR_REG         0x50
+#define GE_TX_LOCAL_PAGE_REG           0x5c
+#define GE_TRANSMIT_CONTROL_REG                0x60
+#define GE_CF_CRC_STRIP_REG            0x1b0
+#define GE_MODE_CHANGE_REG             0x1b4
+#define GE_RECV_CONTROL_REG            0x1e0
+#define GE_STATION_MAC_ADDRESS         0x210
+#define PPE_CFG_CPU_ADD_ADDR           0x580
+#define PPE_CFG_MAX_FRAME_LEN_REG      0x408
+#define PPE_CFG_BUS_CTRL_REG           0x424
+#define PPE_CFG_RX_CTRL_REG            0x428
+#define PPE_CFG_RX_PKT_MODE_REG                0x438
+#define PPE_CFG_QOS_VMID_GEN           0x500
+#define PPE_CFG_RX_PKT_INT             0x538
+#define PPE_INTEN                      0x600
+#define PPE_INTSTS                     0x608
+#define PPE_RINT                       0x604
+#define PPE_CFG_STS_MODE               0x700
+#define PPE_HIS_RX_PKT_CNT             0x804
+
+/* REG_INTERRUPT */
+#define RCV_INT                                BIT(10)
+#define RCV_NOBUF                      BIT(8)
+#define RCV_DROP                       BIT(7)
+#define TX_DROP                                BIT(6)
+#define DEF_INT_ERR                    (RCV_NOBUF | RCV_DROP | TX_DROP)
+#define DEF_INT_MASK                   (RCV_INT | DEF_INT_ERR)
+
+/* TX descriptor config */
+#define TX_FREE_MEM                    BIT(0)
+#define TX_READ_ALLOC_L3               BIT(1)
+#define TX_FINISH_CACHE_INV            BIT(2)
+#define TX_CLEAR_WB                    BIT(4)
+#define TX_L3_CHECKSUM                 BIT(5)
+#define TX_LOOP_BACK                   BIT(11)
+
+/* RX error */
+#define RX_PKT_DROP                    BIT(0)
+#define RX_L2_ERR                      BIT(1)
+#define RX_PKT_ERR                     (RX_PKT_DROP | RX_L2_ERR)
+
+#define SGMII_SPEED_1000               0x08
+#define SGMII_SPEED_100                        0x07
+#define SGMII_SPEED_10                 0x06
+#define MII_SPEED_100                  0x01
+#define MII_SPEED_10                   0x00
+
+#define GE_DUPLEX_FULL                 BIT(0)
+#define GE_DUPLEX_HALF                 0x00
+#define GE_MODE_CHANGE_EN              BIT(0)
+
+#define GE_TX_AUTO_NEG                 BIT(5)
+#define GE_TX_ADD_CRC                  BIT(6)
+#define GE_TX_SHORT_PAD_THROUGH                BIT(7)
+
+#define GE_RX_STRIP_CRC                        BIT(0)
+#define GE_RX_STRIP_PAD                        BIT(3)
+#define GE_RX_PAD_EN                   BIT(4)
+
+#define GE_AUTO_NEG_CTL                        BIT(0)
+
+#define GE_RX_INT_THRESHOLD            BIT(6)
+#define GE_RX_TIMEOUT                  0x04
+
+#define GE_RX_PORT_EN                  BIT(1)
+#define GE_TX_PORT_EN                  BIT(2)
+
+#define PPE_CFG_STS_RX_PKT_CNT_RC      BIT(12)
+
+#define PPE_CFG_RX_PKT_ALIGN           BIT(18)
+#define PPE_CFG_QOS_VMID_MODE          BIT(14)
+#define PPE_CFG_QOS_VMID_GRP_SHIFT     8
+
+#define PPE_CFG_RX_FIFO_FSFU           BIT(11)
+#define PPE_CFG_RX_DEPTH_SHIFT         16
+#define PPE_CFG_RX_START_SHIFT         0
+#define PPE_CFG_RX_CTRL_ALIGN_SHIFT    11
+
+#define PPE_CFG_BUS_LOCAL_REL          BIT(14)
+#define PPE_CFG_BUS_BIG_ENDIEN         BIT(0)
+
+#define RX_DESC_NUM                    128
+#define TX_DESC_NUM                    256
+#define TX_NEXT(N)                     (((N) + 1) & (TX_DESC_NUM-1))
+#define RX_NEXT(N)                     (((N) + 1) & (RX_DESC_NUM-1))
+
+#define GMAC_PPE_RX_PKT_MAX_LEN                379
+#define GMAC_MAX_PKT_LEN               1516
+#define GMAC_MIN_PKT_LEN               31
+#define RX_BUF_SIZE                    1600
+#define RESET_TIMEOUT                  1000
+#define TX_TIMEOUT                     (6 * HZ)
+
+#define DRV_NAME                       "hip04-ether"
+#define DRV_VERSION                    "v1.0"
+
+#define HIP04_MAX_TX_COALESCE_USECS    200
+#define HIP04_MIN_TX_COALESCE_USECS    100
+#define HIP04_MAX_TX_COALESCE_FRAMES   200
+#define HIP04_MIN_TX_COALESCE_FRAMES   100
+
+struct tx_desc {
+       u32 send_addr;
+       u32 send_size;
+       u32 next_addr;
+       u32 cfg;
+       u32 wb_addr;
+} __aligned(64);
+
+struct rx_desc {
+       u16 reserved_16;
+       u16 pkt_len;
+       u32 reserve1[3];
+       u32 pkt_err;
+       u32 reserve2[4];
+};
+
+struct hip04_priv {
+       void __iomem *base;
+       int phy_mode;
+       int chan;
+       unsigned int port;
+       unsigned int speed;
+       unsigned int duplex;
+       unsigned int reg_inten;
+
+       struct napi_struct napi;
+       struct net_device *ndev;
+
+       struct tx_desc *tx_desc;
+       dma_addr_t tx_desc_dma;
+       struct sk_buff *tx_skb[TX_DESC_NUM];
+       dma_addr_t tx_phys[TX_DESC_NUM];
+       unsigned int tx_head;
+
+       int tx_coalesce_frames;
+       int tx_coalesce_usecs;
+       struct hrtimer tx_coalesce_timer;
+
+       unsigned char *rx_buf[RX_DESC_NUM];
+       dma_addr_t rx_phys[RX_DESC_NUM];
+       unsigned int rx_head;
+       unsigned int rx_buf_size;
+
+       struct device_node *phy_node;
+       struct phy_device *phy;
+       struct regmap *map;
+       struct work_struct tx_timeout_task;
+
+       /* written only by tx cleanup */
+       unsigned int tx_tail ____cacheline_aligned_in_smp;
+};
+
+static inline unsigned int tx_count(unsigned int head, unsigned int tail)
+{
+       return (head - tail) % (TX_DESC_NUM - 1);
+}
+
+static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       u32 val;
+
+       priv->speed = speed;
+       priv->duplex = duplex;
+
+       switch (priv->phy_mode) {
+       case PHY_INTERFACE_MODE_SGMII:
+               if (speed == SPEED_1000)
+                       val = SGMII_SPEED_1000;
+               else if (speed == SPEED_100)
+                       val = SGMII_SPEED_100;
+               else
+                       val = SGMII_SPEED_10;
+               break;
+       case PHY_INTERFACE_MODE_MII:
+               if (speed == SPEED_100)
+                       val = MII_SPEED_100;
+               else
+                       val = MII_SPEED_10;
+               break;
+       default:
+               netdev_warn(ndev, "not supported mode\n");
+               val = MII_SPEED_10;
+               break;
+       }
+       writel_relaxed(val, priv->base + GE_PORT_MODE);
+
+       val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
+       writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
+
+       val = GE_MODE_CHANGE_EN;
+       writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
+}
+
+static void hip04_reset_ppe(struct hip04_priv *priv)
+{
+       u32 val, tmp, timeout = 0;
+
+       do {
+               regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
+               regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
+               if (timeout++ > RESET_TIMEOUT)
+                       break;
+       } while (val & 0xfff);
+}
+
+static void hip04_config_fifo(struct hip04_priv *priv)
+{
+       u32 val;
+
+       val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
+       val |= PPE_CFG_STS_RX_PKT_CNT_RC;
+       writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
+
+       val = BIT(priv->port);
+       regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
+
+       val = priv->port << PPE_CFG_QOS_VMID_GRP_SHIFT;
+       val |= PPE_CFG_QOS_VMID_MODE;
+       writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
+
+       val = RX_BUF_SIZE;
+       regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
+
+       val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
+       val |= PPE_CFG_RX_FIFO_FSFU;
+       val |= priv->chan << PPE_CFG_RX_START_SHIFT;
+       regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
+
+       val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
+       writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
+
+       val = PPE_CFG_RX_PKT_ALIGN;
+       writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
+
+       val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
+       writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
+
+       val = GMAC_PPE_RX_PKT_MAX_LEN;
+       writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
+
+       val = GMAC_MAX_PKT_LEN;
+       writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
+
+       val = GMAC_MIN_PKT_LEN;
+       writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
+
+       val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
+       val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
+       writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
+
+       val = GE_RX_STRIP_CRC;
+       writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
+
+       val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
+       val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
+       writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
+
+       val = GE_AUTO_NEG_CTL;
+       writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
+}
+
+static void hip04_mac_enable(struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       u32 val;
+
+       /* enable tx & rx */
+       val = readl_relaxed(priv->base + GE_PORT_EN);
+       val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
+       writel_relaxed(val, priv->base + GE_PORT_EN);
+
+       /* clear rx int */
+       val = RCV_INT;
+       writel_relaxed(val, priv->base + PPE_RINT);
+
+       /* config recv int */
+       val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
+       writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
+
+       /* enable interrupt */
+       priv->reg_inten = DEF_INT_MASK;
+       writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+}
+
+static void hip04_mac_disable(struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       u32 val;
+
+       /* disable int */
+       priv->reg_inten &= ~(DEF_INT_MASK);
+       writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+
+       /* disable tx & rx */
+       val = readl_relaxed(priv->base + GE_PORT_EN);
+       val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
+       writel_relaxed(val, priv->base + GE_PORT_EN);
+}
+
+static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
+{
+       writel(phys, priv->base + PPE_CFG_CPU_ADD_ADDR);
+}
+
+static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
+{
+       regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, phys);
+}
+
+static u32 hip04_recv_cnt(struct hip04_priv *priv)
+{
+       return readl(priv->base + PPE_HIS_RX_PKT_CNT);
+}
+
+static void hip04_update_mac_address(struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+
+       writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
+                      priv->base + GE_STATION_MAC_ADDRESS);
+       writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
+                       (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
+                      priv->base + GE_STATION_MAC_ADDRESS + 4);
+}
+
+static int hip04_set_mac_address(struct net_device *ndev, void *addr)
+{
+       eth_mac_addr(ndev, addr);
+       hip04_update_mac_address(ndev);
+       return 0;
+}
+
+static int hip04_tx_reclaim(struct net_device *ndev, bool force)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       unsigned tx_tail = priv->tx_tail;
+       struct tx_desc *desc;
+       unsigned int bytes_compl = 0, pkts_compl = 0;
+       unsigned int count;
+
+       smp_rmb();
+       count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail);
+       if (count == 0)
+               goto out;
+
+       while (count) {
+               desc = &priv->tx_desc[tx_tail];
+               if (desc->send_addr != 0) {
+                       if (force)
+                               desc->send_addr = 0;
+                       else
+                               break;
+               }
+
+               if (priv->tx_phys[tx_tail]) {
+                       dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
+                                        priv->tx_skb[tx_tail]->len,
+                                        DMA_TO_DEVICE);
+                       priv->tx_phys[tx_tail] = 0;
+               }
+               pkts_compl++;
+               bytes_compl += priv->tx_skb[tx_tail]->len;
+               dev_kfree_skb(priv->tx_skb[tx_tail]);
+               priv->tx_skb[tx_tail] = NULL;
+               tx_tail = TX_NEXT(tx_tail);
+               count--;
+       }
+
+       priv->tx_tail = tx_tail;
+       smp_wmb(); /* Ensure tx_tail visible to xmit */
+
+out:
+       if (pkts_compl || bytes_compl)
+               netdev_completed_queue(ndev, pkts_compl, bytes_compl);
+
+       if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
+               netif_wake_queue(ndev);
+
+       return count;
+}
+
+static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       unsigned int tx_head = priv->tx_head, count;
+       struct tx_desc *desc = &priv->tx_desc[tx_head];
+       dma_addr_t phys;
+
+       smp_rmb();
+       count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
+       if (count == (TX_DESC_NUM - 1)) {
+               netif_stop_queue(ndev);
+               return NETDEV_TX_BUSY;
+       }
+
+       phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
+       if (dma_mapping_error(&ndev->dev, phys)) {
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
+       priv->tx_skb[tx_head] = skb;
+       priv->tx_phys[tx_head] = phys;
+       desc->send_addr = cpu_to_be32(phys);
+       desc->send_size = cpu_to_be32(skb->len);
+       desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
+       phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
+       desc->wb_addr = cpu_to_be32(phys);
+       skb_tx_timestamp(skb);
+
+       hip04_set_xmit_desc(priv, phys);
+       priv->tx_head = TX_NEXT(tx_head);
+       count++;
+       netdev_sent_queue(ndev, skb->len);
+
+       stats->tx_bytes += skb->len;
+       stats->tx_packets++;
+
+       /* Ensure tx_head update visible to tx reclaim */
+       smp_wmb();
+
+       /* queue is getting full, better start cleaning up now */
+       if (count >= priv->tx_coalesce_frames) {
+               if (napi_schedule_prep(&priv->napi)) {
+                       /* disable rx interrupt and timer */
+                       priv->reg_inten &= ~(RCV_INT);
+                       writel_relaxed(DEF_INT_MASK & ~RCV_INT,
+                                      priv->base + PPE_INTEN);
+                       hrtimer_cancel(&priv->tx_coalesce_timer);
+                       __napi_schedule(&priv->napi);
+               }
+       } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
+               /* cleanup not pending yet, start a new timer */
+               hrtimer_start_expires(&priv->tx_coalesce_timer,
+                                     HRTIMER_MODE_REL);
+       }
+
+       return NETDEV_TX_OK;
+}
+
+static int hip04_rx_poll(struct napi_struct *napi, int budget)
+{
+       struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
+       struct net_device *ndev = priv->ndev;
+       struct net_device_stats *stats = &ndev->stats;
+       unsigned int cnt = hip04_recv_cnt(priv);
+       struct rx_desc *desc;
+       struct sk_buff *skb;
+       unsigned char *buf;
+       bool last = false;
+       dma_addr_t phys;
+       int rx = 0;
+       int tx_remaining;
+       u16 len;
+       u32 err;
+
+       while (cnt && !last) {
+               buf = priv->rx_buf[priv->rx_head];
+               skb = build_skb(buf, priv->rx_buf_size);
+               if (unlikely(!skb))
+                       net_dbg_ratelimited("build_skb failed\n");
+
+               dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
+                                RX_BUF_SIZE, DMA_FROM_DEVICE);
+               priv->rx_phys[priv->rx_head] = 0;
+
+               desc = (struct rx_desc *)skb->data;
+               len = be16_to_cpu(desc->pkt_len);
+               err = be32_to_cpu(desc->pkt_err);
+
+               if (0 == len) {
+                       dev_kfree_skb_any(skb);
+                       last = true;
+               } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
+                       dev_kfree_skb_any(skb);
+                       stats->rx_dropped++;
+                       stats->rx_errors++;
+               } else {
+                       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+                       skb_put(skb, len);
+                       skb->protocol = eth_type_trans(skb, ndev);
+                       napi_gro_receive(&priv->napi, skb);
+                       stats->rx_packets++;
+                       stats->rx_bytes += len;
+                       rx++;
+               }
+
+               buf = netdev_alloc_frag(priv->rx_buf_size);
+               if (!buf)
+                       goto done;
+               phys = dma_map_single(&ndev->dev, buf,
+                                     RX_BUF_SIZE, DMA_FROM_DEVICE);
+               if (dma_mapping_error(&ndev->dev, phys))
+                       goto done;
+               priv->rx_buf[priv->rx_head] = buf;
+               priv->rx_phys[priv->rx_head] = phys;
+               hip04_set_recv_desc(priv, phys);
+
+               priv->rx_head = RX_NEXT(priv->rx_head);
+               if (rx >= budget)
+                       goto done;
+
+               if (--cnt == 0)
+                       cnt = hip04_recv_cnt(priv);
+       }
+
+       if (!(priv->reg_inten & RCV_INT)) {
+               /* enable rx interrupt */
+               priv->reg_inten |= RCV_INT;
+               writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+       }
+       napi_complete(napi);
+done:
+       /* clean up tx descriptors and start a new timer if necessary */
+       tx_remaining = hip04_tx_reclaim(ndev, false);
+       if (rx < budget && tx_remaining)
+               hrtimer_start_expires(&priv->tx_coalesce_timer, HRTIMER_MODE_REL);
+
+       return rx;
+}
+
+static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
+{
+       struct net_device *ndev = (struct net_device *)dev_id;
+       struct hip04_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
+
+       if (!ists)
+               return IRQ_NONE;
+
+       writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
+
+       if (unlikely(ists & DEF_INT_ERR)) {
+               if (ists & (RCV_NOBUF | RCV_DROP)) {
+                       stats->rx_errors++;
+                       stats->rx_dropped++;
+                       netdev_err(ndev, "rx drop\n");
+               }
+               if (ists & TX_DROP) {
+                       stats->tx_dropped++;
+                       netdev_err(ndev, "tx drop\n");
+               }
+       }
+
+       if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
+               /* disable rx interrupt */
+               priv->reg_inten &= ~(RCV_INT);
+               writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
+               hrtimer_cancel(&priv->tx_coalesce_timer);
+               __napi_schedule(&priv->napi);
+       }
+
+       return IRQ_HANDLED;
+}
+
+enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
+{
+       struct hip04_priv *priv;
+
+       priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
+
+       if (napi_schedule_prep(&priv->napi)) {
+               /* disable rx interrupt */
+               priv->reg_inten &= ~(RCV_INT);
+               writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
+               __napi_schedule(&priv->napi);
+       }
+
+       return HRTIMER_NORESTART;
+}
+
+static void hip04_adjust_link(struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       struct phy_device *phy = priv->phy;
+
+       if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
+               hip04_config_port(ndev, phy->speed, phy->duplex);
+               phy_print_status(phy);
+       }
+}
+
+static int hip04_mac_open(struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       int i;
+
+       priv->rx_head = 0;
+       priv->tx_head = 0;
+       priv->tx_tail = 0;
+       hip04_reset_ppe(priv);
+
+       for (i = 0; i < RX_DESC_NUM; i++) {
+               dma_addr_t phys;
+
+               phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
+                                     RX_BUF_SIZE, DMA_FROM_DEVICE);
+               if (dma_mapping_error(&ndev->dev, phys))
+                       return -EIO;
+
+               priv->rx_phys[i] = phys;
+               hip04_set_recv_desc(priv, phys);
+       }
+
+       if (priv->phy)
+               phy_start(priv->phy);
+
+       netdev_reset_queue(ndev);
+       netif_start_queue(ndev);
+       hip04_mac_enable(ndev);
+       napi_enable(&priv->napi);
+
+       return 0;
+}
+
+static int hip04_mac_stop(struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       int i;
+
+       napi_disable(&priv->napi);
+       netif_stop_queue(ndev);
+       hip04_mac_disable(ndev);
+       hip04_tx_reclaim(ndev, true);
+       hip04_reset_ppe(priv);
+
+       if (priv->phy)
+               phy_stop(priv->phy);
+
+       for (i = 0; i < RX_DESC_NUM; i++) {
+               if (priv->rx_phys[i]) {
+                       dma_unmap_single(&ndev->dev, priv->rx_phys[i],
+                                        RX_BUF_SIZE, DMA_FROM_DEVICE);
+                       priv->rx_phys[i] = 0;
+               }
+       }
+
+       return 0;
+}
+
+static void hip04_timeout(struct net_device *ndev)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+
+       schedule_work(&priv->tx_timeout_task);
+}
+
+static void hip04_tx_timeout_task(struct work_struct *work)
+{
+       struct hip04_priv *priv;
+
+       priv = container_of(work, struct hip04_priv, tx_timeout_task);
+       hip04_mac_stop(priv->ndev);
+       hip04_mac_open(priv->ndev);
+}
+
+static struct net_device_stats *hip04_get_stats(struct net_device *ndev)
+{
+       return &ndev->stats;
+}
+
+static int hip04_get_coalesce(struct net_device *netdev,
+                             struct ethtool_coalesce *ec)
+{
+       struct hip04_priv *priv = netdev_priv(netdev);
+
+       ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
+       ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
+
+       return 0;
+}
+
+static int hip04_set_coalesce(struct net_device *netdev,
+                             struct ethtool_coalesce *ec)
+{
+       struct hip04_priv *priv = netdev_priv(netdev);
+
+       /* Check not supported parameters  */
+       if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
+           (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
+           (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
+           (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
+           (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
+           (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
+           (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
+           (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) ||
+           (ec->tx_max_coalesced_frames_irq) ||
+           (ec->stats_block_coalesce_usecs) ||
+           (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
+               return -EOPNOTSUPP;
+
+       if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
+            ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
+           (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
+            ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
+               return -EINVAL;
+
+       priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
+       priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
+
+       return 0;
+}
+
+static void hip04_get_drvinfo(struct net_device *netdev,
+                             struct ethtool_drvinfo *drvinfo)
+{
+       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+}
+
+static struct ethtool_ops hip04_ethtool_ops = {
+       .get_coalesce           = hip04_get_coalesce,
+       .set_coalesce           = hip04_set_coalesce,
+       .get_drvinfo            = hip04_get_drvinfo,
+};
+
+static struct net_device_ops hip04_netdev_ops = {
+       .ndo_open               = hip04_mac_open,
+       .ndo_stop               = hip04_mac_stop,
+       .ndo_get_stats          = hip04_get_stats,
+       .ndo_start_xmit         = hip04_mac_start_xmit,
+       .ndo_set_mac_address    = hip04_set_mac_address,
+       .ndo_tx_timeout         = hip04_timeout,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_change_mtu         = eth_change_mtu,
+};
+
+static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       int i;
+
+       priv->tx_desc = dma_alloc_coherent(d,
+                                          TX_DESC_NUM * sizeof(struct tx_desc),
+                                          &priv->tx_desc_dma, GFP_KERNEL);
+       if (!priv->tx_desc)
+               return -ENOMEM;
+
+       priv->rx_buf_size = RX_BUF_SIZE +
+                           SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       for (i = 0; i < RX_DESC_NUM; i++) {
+               priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
+               if (!priv->rx_buf[i])
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void hip04_free_ring(struct net_device *ndev, struct device *d)
+{
+       struct hip04_priv *priv = netdev_priv(ndev);
+       int i;
+
+       for (i = 0; i < RX_DESC_NUM; i++)
+               if (priv->rx_buf[i])
+                       put_page(virt_to_head_page(priv->rx_buf[i]));
+
+       for (i = 0; i < TX_DESC_NUM; i++)
+               if (priv->tx_skb[i])
+                       dev_kfree_skb_any(priv->tx_skb[i]);
+
+       dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
+                         priv->tx_desc, priv->tx_desc_dma);
+}
+
+static int hip04_mac_probe(struct platform_device *pdev)
+{
+       struct device *d = &pdev->dev;
+       struct device_node *node = d->of_node;
+       struct of_phandle_args arg;
+       struct net_device *ndev;
+       struct hip04_priv *priv;
+       struct resource *res;
+       unsigned int irq;
+       ktime_t txtime;
+       int ret;
+
+       ndev = alloc_etherdev(sizeof(struct hip04_priv));
+       if (!ndev)
+               return -ENOMEM;
+
+       priv = netdev_priv(ndev);
+       priv->ndev = ndev;
+       platform_set_drvdata(pdev, ndev);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->base = devm_ioremap_resource(d, res);
+       if (IS_ERR(priv->base)) {
+               ret = PTR_ERR(priv->base);
+               goto init_fail;
+       }
+
+       ret = of_parse_phandle_with_fixed_args(node, "port-handle", 2, 0, &arg);
+       if (ret < 0) {
+               dev_warn(d, "no port-handle\n");
+               goto init_fail;
+       }
+
+       priv->port = arg.args[0];
+       priv->chan = arg.args[1] * RX_DESC_NUM;
+
+       hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+       /* BQL will try to keep the TX queue as short as possible, but it can't
+        * be faster than tx_coalesce_usecs, so we need a fast timeout here,
+        * but also long enough to gather up enough frames to ensure we don't
+        * get more interrupts than necessary.
+        * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
+        */
+       priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
+       priv->tx_coalesce_usecs = 200;
+       /* allow timer to fire after half the time at the earliest */
+       txtime = ktime_set(0, priv->tx_coalesce_usecs * NSEC_PER_USEC / 2);
+       hrtimer_set_expires_range(&priv->tx_coalesce_timer, txtime, txtime);
+       priv->tx_coalesce_timer.function = tx_done;
+
+       priv->map = syscon_node_to_regmap(arg.np);
+       if (IS_ERR(priv->map)) {
+               dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
+               ret = PTR_ERR(priv->map);
+               goto init_fail;
+       }
+
+       priv->phy_mode = of_get_phy_mode(node);
+       if (priv->phy_mode < 0) {
+               dev_warn(d, "not find phy-mode\n");
+               ret = -EINVAL;
+               goto init_fail;
+       }
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0) {
+               ret = -EINVAL;
+               goto init_fail;
+       }
+
+       ret = devm_request_irq(d, irq, hip04_mac_interrupt,
+                              0, pdev->name, ndev);
+       if (ret) {
+               netdev_err(ndev, "devm_request_irq failed\n");
+               goto init_fail;
+       }
+
+       priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
+       if (priv->phy_node) {
+               priv->phy = of_phy_connect(ndev, priv->phy_node,
+                                          &hip04_adjust_link,
+                                          0, priv->phy_mode);
+               if (!priv->phy) {
+                       ret = -EPROBE_DEFER;
+                       goto init_fail;
+               }
+       }
+
+       INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
+
+       ether_setup(ndev);
+       ndev->netdev_ops = &hip04_netdev_ops;
+       ndev->ethtool_ops = &hip04_ethtool_ops;
+       ndev->watchdog_timeo = TX_TIMEOUT;
+       ndev->priv_flags |= IFF_UNICAST_FLT;
+       ndev->irq = irq;
+       netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+
+       hip04_reset_ppe(priv);
+       if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
+               hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
+
+       hip04_config_fifo(priv);
+       random_ether_addr(ndev->dev_addr);
+       hip04_update_mac_address(ndev);
+
+       ret = hip04_alloc_ring(ndev, d);
+       if (ret) {
+               netdev_err(ndev, "alloc ring fail\n");
+               goto alloc_fail;
+       }
+
+       ret = register_netdev(ndev);
+       if (ret) {
+               free_netdev(ndev);
+               goto alloc_fail;
+       }
+
+       return 0;
+
+alloc_fail:
+       hip04_free_ring(ndev, d);
+init_fail:
+       of_node_put(priv->phy_node);
+       free_netdev(ndev);
+       return ret;
+}
+
+static int hip04_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct hip04_priv *priv = netdev_priv(ndev);
+       struct device *d = &pdev->dev;
+
+       if (priv->phy)
+               phy_disconnect(priv->phy);
+
+       hip04_free_ring(ndev, d);
+       unregister_netdev(ndev);
+       free_irq(ndev->irq, ndev);
+       of_node_put(priv->phy_node);
+       cancel_work_sync(&priv->tx_timeout_task);
+       free_netdev(ndev);
+
+       return 0;
+}
+
+static const struct of_device_id hip04_mac_match[] = {
+       { .compatible = "hisilicon,hip04-mac" },
+       { }
+};
+
+MODULE_DEVICE_TABLE(of, hip04_mac_match);
+
+static struct platform_driver hip04_mac_driver = {
+       .probe  = hip04_mac_probe,
+       .remove = hip04_remove,
+       .driver = {
+               .name           = DRV_NAME,
+               .owner          = THIS_MODULE,
+               .of_match_table = hip04_mac_match,
+       },
+};
+module_platform_driver(hip04_mac_driver);
+
+MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c
new file mode 100644 (file)
index 0000000..b3bac25
--- /dev/null
@@ -0,0 +1,186 @@
+/* Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of_mdio.h>
+#include <linux/delay.h>
+
+#define MDIO_CMD_REG           0x0
+#define MDIO_ADDR_REG          0x4
+#define MDIO_WDATA_REG         0x8
+#define MDIO_RDATA_REG         0xc
+#define MDIO_STA_REG           0x10
+
+#define MDIO_START             BIT(14)
+#define MDIO_R_VALID           BIT(1)
+#define MDIO_READ              (BIT(12) | BIT(11) | MDIO_START)
+#define MDIO_WRITE             (BIT(12) | BIT(10) | MDIO_START)
+
+struct hip04_mdio_priv {
+       void __iomem *base;
+};
+
+#define WAIT_TIMEOUT 10
+static int hip04_mdio_wait_ready(struct mii_bus *bus)
+{
+       struct hip04_mdio_priv *priv = bus->priv;
+       int i;
+
+       for (i = 0; readl_relaxed(priv->base + MDIO_CMD_REG) & MDIO_START; i++) {
+               if (i == WAIT_TIMEOUT)
+                       return -ETIMEDOUT;
+               msleep(20);
+       }
+
+       return 0;
+}
+
+static int hip04_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+       struct hip04_mdio_priv *priv = bus->priv;
+       u32 val;
+       int ret;
+
+       ret = hip04_mdio_wait_ready(bus);
+       if (ret < 0)
+               goto out;
+
+       val = regnum | (mii_id << 5) | MDIO_READ;
+       writel_relaxed(val, priv->base + MDIO_CMD_REG);
+
+       ret = hip04_mdio_wait_ready(bus);
+       if (ret < 0)
+               goto out;
+
+       val = readl_relaxed(priv->base + MDIO_STA_REG);
+       if (val & MDIO_R_VALID) {
+               dev_err(bus->parent, "SMI bus read not valid\n");
+               ret = -ENODEV;
+               goto out;
+       }
+
+       val = readl_relaxed(priv->base + MDIO_RDATA_REG);
+       ret = val & 0xFFFF;
+out:
+       return ret;
+}
+
+static int hip04_mdio_write(struct mii_bus *bus, int mii_id,
+                           int regnum, u16 value)
+{
+       struct hip04_mdio_priv *priv = bus->priv;
+       u32 val;
+       int ret;
+
+       ret = hip04_mdio_wait_ready(bus);
+       if (ret < 0)
+               goto out;
+
+       writel_relaxed(value, priv->base + MDIO_WDATA_REG);
+       val = regnum | (mii_id << 5) | MDIO_WRITE;
+       writel_relaxed(val, priv->base + MDIO_CMD_REG);
+out:
+       return ret;
+}
+
+static int hip04_mdio_reset(struct mii_bus *bus)
+{
+       int temp, i;
+
+       for (i = 0; i < PHY_MAX_ADDR; i++) {
+               hip04_mdio_write(bus, i, 22, 0);
+               temp = hip04_mdio_read(bus, i, MII_BMCR);
+               if (temp < 0)
+                       continue;
+
+               temp |= BMCR_RESET;
+               if (hip04_mdio_write(bus, i, MII_BMCR, temp) < 0)
+                       continue;
+       }
+
+       mdelay(500);
+       return 0;
+}
+
+static int hip04_mdio_probe(struct platform_device *pdev)
+{
+       struct resource *r;
+       struct mii_bus *bus;
+       struct hip04_mdio_priv *priv;
+       int ret;
+
+       bus = mdiobus_alloc_size(sizeof(struct hip04_mdio_priv));
+       if (!bus) {
+               dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
+               return -ENOMEM;
+       }
+
+       bus->name = "hip04_mdio_bus";
+       bus->read = hip04_mdio_read;
+       bus->write = hip04_mdio_write;
+       bus->reset = hip04_mdio_reset;
+       snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
+       bus->parent = &pdev->dev;
+       priv = bus->priv;
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->base = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(priv->base)) {
+               ret = PTR_ERR(priv->base);
+               goto out_mdio;
+       }
+
+       ret = of_mdiobus_register(bus, pdev->dev.of_node);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
+               goto out_mdio;
+       }
+
+       platform_set_drvdata(pdev, bus);
+
+       return 0;
+
+out_mdio:
+       mdiobus_free(bus);
+       return ret;
+}
+
+static int hip04_mdio_remove(struct platform_device *pdev)
+{
+       struct mii_bus *bus = platform_get_drvdata(pdev);
+
+       mdiobus_unregister(bus);
+       mdiobus_free(bus);
+
+       return 0;
+}
+
+static const struct of_device_id hip04_mdio_match[] = {
+       { .compatible = "hisilicon,hip04-mdio" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, hip04_mdio_match);
+
+static struct platform_driver hip04_mdio_driver = {
+       .probe = hip04_mdio_probe,
+       .remove = hip04_mdio_remove,
+       .driver = {
+               .name = "hip04-mdio",
+               .owner = THIS_MODULE,
+               .of_match_table = hip04_mdio_match,
+       },
+};
+
+module_platform_driver(hip04_mdio_driver);
+
+MODULE_DESCRIPTION("HISILICON P04 MDIO interface driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hip04-mdio");
index 566b17db135a306a2bb8eea0ce167a70e6e165f4..e8a1adb7a96255bf8da1baa87b29514527c2764d 100644 (file)
@@ -2064,9 +2064,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        memset(swqe, 0, SWQE_HEADER_SIZE);
        atomic_dec(&pr->swqe_avail);
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
-               swqe->vlan_tag = vlan_tx_tag_get(skb);
+               swqe->vlan_tag = skb_vlan_tag_get(skb);
        }
 
        pr->tx_packets++;
index 9388a83818f2f408446654adef22cb7020d567ab..162762d1a12cb1ffcb34a2325150278650168c29 100644 (file)
@@ -2367,7 +2367,7 @@ static int emac_wait_deps(struct emac_instance *dev)
        err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
        for (i = 0; i < EMAC_DEP_COUNT; i++) {
                of_node_put(deps[i].node);
-               if (err && deps[i].ofdev)
+               if (err)
                        of_dev_put(deps[i].ofdev);
        }
        if (err == 0) {
index 5b8300a32bf5f5eb1df93d7262b22a00b7d77a9f..f4ff465584a082d76618be172ed05d559a0950de 100644 (file)
@@ -192,6 +192,17 @@ config IXGBE
          To compile this driver as a module, choose M here. The module
          will be called ixgbe.
 
+config IXGBE_VXLAN
+       bool "Virtual eXtensible Local Area Network Support"
+       default n
+       depends on IXGBE && VXLAN && !(IXGBE=y && VXLAN=m)
+       ---help---
+         This allows one to create VXLAN virtual interfaces that provide
+         Layer 2 Networks over Layer 3 Networks. VXLAN is often used
+         to tunnel virtual network infrastructure in virtualized environments.
+         Say Y here if you want to use Virtual eXtensible Local Area Network
+         (VXLAN) in the driver.
+
 config IXGBE_HWMON
        bool "Intel(R) 10GbE PCI Express adapters HWMON support"
        default y
@@ -281,6 +292,17 @@ config I40E_DCB
 
          If unsure, say N.
 
+config I40E_FCOE
+       bool "Fibre Channel over Ethernet (FCoE)"
+       default n
+       depends on I40E && DCB && FCOE
+       ---help---
+         Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
+         in the driver. This will create new netdev for exclusive FCoE
+         use with XL710 FCoE offloads enabled.
+
+         If unsure, say N.
+
 config I40EVF
        tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
        depends on PCI_MSI
index 781065eb5431c811f6fb37da81b6e6c12f495770..e9c3a87e5b115dc690ef2b81bbe16a5480dae5b1 100644 (file)
@@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic)
                mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
        } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
           (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
-               !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
+               (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
                /* enable/disable MDI/MDI-X auto-switching. */
                mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
                                nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
index b691eb4f63766b281786623f1b533cdcb4fb3a0d..4270ad2d4ddfa91f9e98a759959f67441b743a9f 100644 (file)
@@ -24,6 +24,7 @@
 /* ethtool support for e1000 */
 
 #include "e1000.h"
+#include <linux/jiffies.h>
 #include <linux/uaccess.h>
 
 enum {NETDEV_STATS, E1000_STATS};
@@ -1460,7 +1461,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
                        ret_val = 13; /* ret_val is the same as mis-compare */
                        break;
                }
-               if (jiffies >= (time + 2)) {
+               if (time_after_eq(jiffies, time + 2)) {
                        ret_val = 14; /* error code for time out error */
                        break;
                }
index 83140cbb5f0119d6ab478e3491a44c4813f152f7..7f997d36948f3e59621b3698b1a43b36166ecf93 100644 (file)
@@ -2977,7 +2977,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
                           struct e1000_tx_ring *tx_ring, int tx_flags,
                           int count)
 {
-       struct e1000_hw *hw = &adapter->hw;
        struct e1000_tx_desc *tx_desc = NULL;
        struct e1000_tx_buffer *buffer_info;
        u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
@@ -3031,11 +3030,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
        wmb();
 
        tx_ring->next_to_use = i;
-       writel(i, hw->hw_addr + tx_ring->tdt);
-       /* we need this if more than one processor can write to our tail
-        * at a time, it synchronizes IO on IA64/Altix systems
-        */
-       mmiowb();
 }
 
 /* 82547 workaround to avoid controller hang in half-duplex environment.
@@ -3226,9 +3220,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                return NETDEV_TX_BUSY;
        }
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                tx_flags |= E1000_TX_FLAGS_VLAN;
-               tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+               tx_flags |= (skb_vlan_tag_get(skb) <<
+                            E1000_TX_FLAGS_VLAN_SHIFT);
        }
 
        first = tx_ring->next_to_use;
@@ -3263,6 +3258,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                /* Make sure there is space in the ring for the next send. */
                e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
 
+               if (!skb->xmit_more ||
+                   netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
+                       writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
+                       /* we need this if more than one processor can write to
+                        * our tail at a time, it synchronizes IO on IA64/Altix
+                        * systems
+                        */
+                       mmiowb();
+               }
        } else {
                dev_kfree_skb_any(skb);
                tx_ring->buffer_info[first].time_stamp = 0;
index 332a298e95b551f5d4197274fbb906308a00a0b0..1e8c40fd5c3d8fbc582dc19c4f71766023901968 100644 (file)
@@ -5444,16 +5444,6 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
        wmb();
 
        tx_ring->next_to_use = i;
-
-       if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
-               e1000e_update_tdt_wa(tx_ring, i);
-       else
-               writel(i, tx_ring->tail);
-
-       /* we need this if more than one processor can write to our tail
-        * at a time, it synchronizes IO on IA64/Altix systems
-        */
-       mmiowb();
 }
 
 #define MINIMUM_DHCP_PACKET_SIZE 282
@@ -5463,8 +5453,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
        struct e1000_hw *hw = &adapter->hw;
        u16 length, offset;
 
-       if (vlan_tx_tag_present(skb) &&
-           !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+       if (skb_vlan_tag_present(skb) &&
+           !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
              (adapter->hw.mng_cookie.status &
               E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
                return 0;
@@ -5603,9 +5593,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        if (e1000_maybe_stop_tx(tx_ring, count + 2))
                return NETDEV_TX_BUSY;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                tx_flags |= E1000_TX_FLAGS_VLAN;
-               tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+               tx_flags |= (skb_vlan_tag_get(skb) <<
+                            E1000_TX_FLAGS_VLAN_SHIFT);
        }
 
        first = tx_ring->next_to_use;
@@ -5635,8 +5626,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
                             nr_frags);
        if (count) {
-               if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
-                            !adapter->tx_hwtstamp_skb)) {
+               if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+                   (adapter->flags & FLAG_HAS_HW_TIMESTAMP) &&
+                   !adapter->tx_hwtstamp_skb) {
                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                        tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
                        adapter->tx_hwtstamp_skb = skb_get(skb);
@@ -5653,6 +5645,21 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                    (MAX_SKB_FRAGS *
                                     DIV_ROUND_UP(PAGE_SIZE,
                                                  adapter->tx_fifo_limit) + 2));
+
+               if (!skb->xmit_more ||
+                   netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
+                       if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+                               e1000e_update_tdt_wa(tx_ring,
+                                                    tx_ring->next_to_use);
+                       else
+                               writel(tx_ring->next_to_use, tx_ring->tail);
+
+                       /* we need this if more than one processor can write
+                        * to our tail at a time, it synchronizes IO on
+                        *IA64/Altix systems
+                        */
+                       mmiowb();
+               }
        } else {
                dev_kfree_skb_any(skb);
                tx_ring->buffer_info[first].time_stamp = 0;
index eb088b129bc717ae786880b4d08d5d3260ddc1c7..84ab9eea2768406e2b3f9a22f946e7417160f795 100644 (file)
@@ -97,7 +97,6 @@ static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
         */
        if (dma_mapping_error(rx_ring->dev, dma)) {
                __free_page(page);
-               bi->page = NULL;
 
                rx_ring->rx_stats.alloc_failed++;
                return false;
@@ -147,8 +146,8 @@ void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
                        i -= rx_ring->count;
                }
 
-               /* clear the hdr_addr for the next_to_use descriptor */
-               rx_desc->q.hdr_addr = 0;
+               /* clear the status bits for the next_to_use descriptor */
+               rx_desc->d.staterr = 0;
 
                cleaned_count--;
        } while (cleaned_count);
@@ -194,7 +193,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
        rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 
        /* transfer page from old buffer to new buffer */
-       memcpy(new_buff, old_buff, sizeof(struct fm10k_rx_buffer));
+       *new_buff = *old_buff;
 
        /* sync the buffer for use by the device */
        dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -203,12 +202,17 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
                                         DMA_FROM_DEVICE);
 }
 
+static inline bool fm10k_page_is_reserved(struct page *page)
+{
+       return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+}
+
 static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
                                    struct page *page,
                                    unsigned int truesize)
 {
        /* avoid re-using remote pages */
-       if (unlikely(page_to_nid(page) != numa_mem_id()))
+       if (unlikely(fm10k_page_is_reserved(page)))
                return false;
 
 #if (PAGE_SIZE < 8192)
@@ -218,22 +222,19 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
 
        /* flip page offset to other buffer */
        rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
-
-       /* Even if we own the page, we are not allowed to use atomic_set()
-        * This would break get_page_unless_zero() users.
-        */
-       atomic_inc(&page->_count);
 #else
        /* move offset up to the next cache line */
        rx_buffer->page_offset += truesize;
 
        if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ))
                return false;
-
-       /* bump ref count on page before it is given to the stack */
-       get_page(page);
 #endif
 
+       /* Even if we own the page, we are not allowed to use atomic_set()
+        * This would break get_page_unless_zero() users.
+        */
+       atomic_inc(&page->_count);
+
        return true;
 }
 
@@ -270,12 +271,12 @@ static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring,
 
                memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
-               /* we can reuse buffer as-is, just make sure it is local */
-               if (likely(page_to_nid(page) == numa_mem_id()))
+               /* page is not reserved, we can reuse buffer as-is */
+               if (likely(!fm10k_page_is_reserved(page)))
                        return true;
 
                /* this page cannot be reused so discard it */
-               put_page(page);
+               __free_page(page);
                return false;
        }
 
@@ -293,7 +294,6 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
        struct page *page;
 
        rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean];
-
        page = rx_buffer->page;
        prefetchw(page);
 
@@ -727,6 +727,12 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
        struct ethhdr *eth_hdr;
        u8 l4_hdr = 0;
 
+/* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */
+#define FM10K_MAX_ENCAP_TRANSPORT_OFFSET       164
+       if (skb_inner_transport_header(skb) - skb_mac_header(skb) >
+           FM10K_MAX_ENCAP_TRANSPORT_OFFSET)
+               return 0;
+
        switch (vlan_get_protocol(skb)) {
        case htons(ETH_P_IP):
                l4_hdr = ip_hdr(skb)->protocol;
@@ -965,8 +971,8 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
        tx_desc = FM10K_TX_DESC(tx_ring, i);
 
        /* add HW VLAN tag */
-       if (vlan_tx_tag_present(skb))
-               tx_desc->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+       if (skb_vlan_tag_present(skb))
+               tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
        else
                tx_desc->vlan = 0;
 
index 14a4ea795c01c58e9d123e816c85ea38fbb0fe60..9f5457c9e627620dfe421bc173c85b24fddb67a4 100644 (file)
@@ -1194,12 +1194,11 @@ static s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw,
 {
        const enum fm10k_mbx_state state = mbx->state;
        const u32 *hdr = &mbx->mbx_hdr;
-       u16 head, tail;
+       u16 head;
        s32 err;
 
-       /* we will need to pull all of the fields for verification */
+       /* we will need to pull the header field for verification */
        head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
-       tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL);
 
        /* We should not be receiving disconnect if Rx is incomplete */
        if (mbx->pushed)
index 8811364b91cbed2bd8daee81911ad23c7e1bfb8f..cfde8bac1aeb2a23e4c447f786e06394f3ce285e 100644 (file)
@@ -609,7 +609,7 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
        int err;
 
        if ((skb->protocol ==  htons(ETH_P_8021Q)) &&
-           !vlan_tx_tag_present(skb)) {
+           !skb_vlan_tag_present(skb)) {
                /* FM10K only supports hardware tagging, any tags in frame
                 * are considered 2nd level or "outer" tags
                 */
@@ -1414,13 +1414,12 @@ struct net_device *fm10k_alloc_netdev(void)
        dev->vlan_features |= dev->features;
 
        /* configure tunnel offloads */
-       dev->hw_enc_features = NETIF_F_IP_CSUM |
-                              NETIF_F_TSO |
-                              NETIF_F_TSO6 |
-                              NETIF_F_TSO_ECN |
-                              NETIF_F_GSO_UDP_TUNNEL |
-                              NETIF_F_IPV6_CSUM |
-                              NETIF_F_SG;
+       dev->hw_enc_features |= NETIF_F_IP_CSUM |
+                               NETIF_F_TSO |
+                               NETIF_F_TSO6 |
+                               NETIF_F_TSO_ECN |
+                               NETIF_F_GSO_UDP_TUNNEL |
+                               NETIF_F_IPV6_CSUM;
 
        /* we want to leave these both on as we cannot disable VLAN tag
         * insertion or stripping on the hardware since it is contained
index 275423d4f77778911b71145dfd165de6dcb8d2ca..7e4711958e463a959c69365fdb3d5c9ff956f3e4 100644 (file)
@@ -330,13 +330,10 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
        struct fm10k_mac_update mac_update;
        u32 msg[5];
 
-       /* if glort is not valid return error */
-       if (!fm10k_glort_valid_pf(hw, glort))
+       /* if glort or vlan are not valid return error */
+       if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
                return FM10K_ERR_PARAM;
 
-       /* drop upper 4 bits of VLAN ID */
-       vid = (vid << 4) >> 4;
-
        /* record fields */
        mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) |
                                                 ((u32)mac[3] << 16) |
index 7822809436a362069770c57db8c6e0f03cbf27ed..d966044e017af043fdc24770d4d642700949102b 100644 (file)
@@ -57,7 +57,6 @@ void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
        struct sk_buff_head *list = &interface->ts_tx_skb_queue;
        struct sk_buff *clone;
        unsigned long flags;
-       __le16 dglort;
 
        /* create clone for us to return on the Tx path */
        clone = skb_clone_sk(skb);
@@ -65,8 +64,6 @@ void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
                return;
 
        FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT;
-       dglort = FM10K_CB(clone)->fi.w.dglort;
-
        spin_lock_irqsave(&list->lock, flags);
 
        /* attempt to locate any buffers with the same dglort,
index 280296f291544b4f327fa563103fb903889f1fb4..7c6d9d5a8ae5c5042f8e68843a26ef770b45bbf5 100644 (file)
@@ -354,7 +354,7 @@ struct fm10k_hw;
 
 /* Define timeouts for resets and disables */
 #define FM10K_QUEUE_DISABLE_TIMEOUT            100
-#define FM10K_RESET_TIMEOUT                    100
+#define FM10K_RESET_TIMEOUT                    150
 
 /* VF registers */
 #define FM10K_VFCTRL           0x00000
index 4b94ddb29c248ed2571c4d90eb37d8c8ecbec935..c405819991214e21a25b5a670d0097482fd643ab 100644 (file)
@@ -44,4 +44,4 @@ i40e-objs := i40e_main.o \
        i40e_virtchnl_pf.o
 
 i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
-i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
+i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
index fc50f6461b13e3bd2633eecf7b3a71538a432dc2..fadf8fa3cb7534530950538e46f5aab0ddb3b6fb 100644 (file)
@@ -92,6 +92,7 @@
 #define I40E_MAX_USER_PRIORITY        8
 #define I40E_DEFAULT_MSG_ENABLE       4
 #define I40E_QUEUE_WAIT_RETRY_LIMIT   10
+#define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 9)
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -268,7 +269,7 @@ struct i40e_pf {
        u16 rx_itr_default;
        u16 tx_itr_default;
        u16 msg_enable;
-       char misc_int_name[IFNAMSIZ + 9];
+       char int_name[I40E_INT_NAME_STR_LEN];
        u16 adminq_work_limit; /* num of admin receive queue desc to process */
        unsigned long service_timer_period;
        unsigned long service_timer_previous;
@@ -524,7 +525,7 @@ struct i40e_q_vector {
 
        cpumask_t affinity_mask;
        struct rcu_head rcu;    /* to avoid race with update stats on free */
-       char name[IFNAMSIZ + 9];
+       char name[I40E_INT_NAME_STR_LEN];
 } ____cacheline_internodealigned_in_smp;
 
 /* lan device */
index 564d0b0192f789aad0b4af4ceed04fe9fd15cd06..de17b6fbcc4e2a2f42054a875f961f9344a27d9a 100644 (file)
@@ -148,7 +148,7 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
 
 /* general information */
 #define I40E_AQ_LARGE_BUF      512
-#define I40E_ASQ_CMD_TIMEOUT   100  /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT   250  /* msecs */
 
 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
                                       u16 opcode);
index 8835aeeff23e13f1d1d0c78f73ed46442b05c40c..929e3d72a01e5aa6901787fa708f046198af0a22 100644 (file)
@@ -256,6 +256,8 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_lldp_stop          = 0x0A05,
        i40e_aqc_opc_lldp_start         = 0x0A06,
        i40e_aqc_opc_get_cee_dcb_cfg    = 0x0A07,
+       i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
+       i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
 
        /* Tunnel commands */
        i40e_aqc_opc_add_udp_tunnel     = 0x0B00,
@@ -268,6 +270,8 @@ enum i40e_admin_queue_opc {
        /* OEM commands */
        i40e_aqc_opc_oem_parameter_change       = 0xFE00,
        i40e_aqc_opc_oem_device_status_change   = 0xFE01,
+       i40e_aqc_opc_oem_ocsd_initialize        = 0xFE02,
+       i40e_aqc_opc_oem_ocbb_initialize        = 0xFE03,
 
        /* debug commands */
        i40e_aqc_opc_debug_get_deviceid         = 0xFF00,
@@ -276,7 +280,6 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_debug_write_reg            = 0xFF04,
        i40e_aqc_opc_debug_modify_reg           = 0xFF07,
        i40e_aqc_opc_debug_dump_internals       = 0xFF08,
-       i40e_aqc_opc_debug_modify_internals     = 0xFF09,
 };
 
 /* command structures and indirect data structures */
@@ -410,6 +413,7 @@ struct i40e_aqc_list_capabilities_element_resp {
 #define I40E_AQ_CAP_ID_VSI             0x0017
 #define I40E_AQ_CAP_ID_DCB             0x0018
 #define I40E_AQ_CAP_ID_FCOE            0x0021
+#define I40E_AQ_CAP_ID_ISCSI           0x0022
 #define I40E_AQ_CAP_ID_RSS             0x0040
 #define I40E_AQ_CAP_ID_RXQ             0x0041
 #define I40E_AQ_CAP_ID_TXQ             0x0042
@@ -454,8 +458,11 @@ struct i40e_aqc_arp_proxy_data {
        __le32  pfpm_proxyfc;
        __le32  ip_addr;
        u8      mac_addr[6];
+       u8      reserved[2];
 };
 
+I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
+
 /* Set NS Proxy Table Entry Command (indirect 0x0105) */
 struct i40e_aqc_ns_proxy_data {
        __le16  table_idx_mac_addr_0;
@@ -481,6 +488,8 @@ struct i40e_aqc_ns_proxy_data {
        u8      ipv6_addr_1[16];
 };
 
+I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
+
 /* Manage LAA Command (0x0106) - obsolete */
 struct i40e_aqc_mng_laa {
        __le16  command_flags;
@@ -491,6 +500,8 @@ struct i40e_aqc_mng_laa {
        u8      reserved2[6];
 };
 
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
+
 /* Manage MAC Address Read Command (indirect 0x0107) */
 struct i40e_aqc_mac_address_read {
        __le16  command_flags;
@@ -562,6 +573,8 @@ struct i40e_aqc_get_switch_config_header_resp {
        u8      reserved[12];
 };
 
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
+
 struct i40e_aqc_switch_config_element_resp {
        u8      element_type;
 #define I40E_AQ_SW_ELEM_TYPE_MAC       1
@@ -587,6 +600,8 @@ struct i40e_aqc_switch_config_element_resp {
        __le16  element_info;
 };
 
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
+
 /* Get Switch Configuration (indirect 0x0200)
  *    an array of elements are returned in the response buffer
  *    the first in the array is the header, remainder are elements
@@ -596,6 +611,8 @@ struct i40e_aqc_get_switch_config_resp {
        struct i40e_aqc_switch_config_element_resp      element[1];
 };
 
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
+
 /* Add Statistics (direct 0x0201)
  * Remove Statistics (direct 0x0202)
  */
@@ -661,6 +678,8 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
        u8      reserved2[6];
 };
 
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
+
 /* Add VSI (indirect 0x0210)
  *    this indirect command uses struct i40e_aqc_vsi_properties_data
  *    as the indirect buffer (128 bytes)
@@ -1092,6 +1111,8 @@ struct i40e_aqc_remove_tag {
        u8      reserved[12];
 };
 
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
+
 /* Add multicast E-Tag (direct 0x0257)
  * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
  * and no external data
@@ -1207,7 +1228,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
        } ipaddr;
        __le16  flags;
 #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT                        0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK                 (0x3F << \
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
                                        I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
 /* 0x0000 reserved */
 #define I40E_AQC_ADD_CLOUD_FILTER_OIP                  0x0001
@@ -1240,7 +1261,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
        u8      reserved[4];
        __le16  queue_number;
 #define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT         0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK          (0x3F << \
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK          (0x7FF << \
                                                 I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
        u8      reserved2[14];
        /* response section */
@@ -1359,6 +1380,8 @@ struct i40e_aqc_configure_vsi_ets_sla_bw_data {
        u8      reserved1[28];
 };
 
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
+
 /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
  *    responds with i40e_aqc_qs_handles_resp
  */
@@ -1370,6 +1393,8 @@ struct i40e_aqc_configure_vsi_tc_bw_data {
        __le16  qs_handles[8];
 };
 
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
+
 /* Query vsi bw configuration (indirect 0x0408) */
 struct i40e_aqc_query_vsi_bw_config_resp {
        u8      tc_valid_bits;
@@ -1383,6 +1408,8 @@ struct i40e_aqc_query_vsi_bw_config_resp {
        u8      reserved3[23];
 };
 
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
+
 /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
 struct i40e_aqc_query_vsi_ets_sla_config_resp {
        u8      tc_valid_bits;
@@ -1394,6 +1421,8 @@ struct i40e_aqc_query_vsi_ets_sla_config_resp {
        __le16  tc_bw_max[2];
 };
 
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
+
 /* Configure Switching Component Bandwidth Limit (direct 0x0410) */
 struct i40e_aqc_configure_switching_comp_bw_limit {
        __le16  seid;
@@ -1421,6 +1450,8 @@ struct i40e_aqc_configure_switching_comp_ets_data {
        u8      reserved2[96];
 };
 
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
+
 /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
 struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
        u8      tc_valid_bits;
@@ -1432,6 +1463,9 @@ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
        u8      reserved1[28];
 };
 
+I40E_CHECK_STRUCT_LEN(0x40,
+                     i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+
 /* Configure Switching Component Bandwidth Allocation per Tc
  * (indirect 0x0417)
  */
@@ -1443,6 +1477,8 @@ struct i40e_aqc_configure_switching_comp_bw_config_data {
        u8      reserved1[20];
 };
 
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
+
 /* Query Switching Component Configuration (indirect 0x0418) */
 struct i40e_aqc_query_switching_comp_ets_config_resp {
        u8      tc_valid_bits;
@@ -1453,6 +1489,8 @@ struct i40e_aqc_query_switching_comp_ets_config_resp {
        u8      reserved2[23];
 };
 
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
+
 /* Query PhysicalPort ETS Configuration (indirect 0x0419) */
 struct i40e_aqc_query_port_ets_config_resp {
        u8      reserved[4];
@@ -1468,6 +1506,8 @@ struct i40e_aqc_query_port_ets_config_resp {
        u8      reserved3[32];
 };
 
+I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
+
 /* Query Switching Component Bandwidth Allocation per Traffic Type
  * (indirect 0x041A)
  */
@@ -1482,6 +1522,8 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
        __le16  tc_bw_max[2];
 };
 
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
+
 /* Suspend/resume port TX traffic
  * (direct 0x041B and 0x041C) uses the generic SEID struct
  */
@@ -1495,6 +1537,8 @@ struct i40e_aqc_configure_partition_bw_data {
        u8      max_bw[16];      /* bandwidth limit */
 };
 
+I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+
 /* Get and set the active HMC resource profile and status.
  * (direct 0x0500) and (direct 0x0501)
  */
@@ -1577,6 +1621,8 @@ struct i40e_aqc_module_desc {
        u8 reserved2[8];
 };
 
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
+
 struct i40e_aq_get_phy_abilities_resp {
        __le32  phy_type;       /* bitmap using the above enum for offsets */
        u8      link_speed;     /* bitmap using the above enum bit patterns */
@@ -1605,6 +1651,8 @@ struct i40e_aq_get_phy_abilities_resp {
        struct i40e_aqc_module_desc     qualified_module[I40E_AQ_PHY_MAX_QMS];
 };
 
+I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
+
 /* Set PHY Config (direct 0x0601) */
 struct i40e_aq_set_phy_config { /* same bits as above in all */
        __le32  phy_type;
@@ -1788,12 +1836,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
 /* NVM Config Read (indirect 0x0704) */
 struct i40e_aqc_nvm_config_read {
        __le16  cmd_flags;
-#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK  1
-#define ANVM_READ_SINGLE_FEATURE               0
-#define ANVM_READ_MULTIPLE_FEATURES            1
+#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK  1 
+#define I40E_AQ_ANVM_READ_SINGLE_FEATURE               0 
+#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES            1
        __le16  element_count;
-       __le16  element_id; /* Feature/field ID */
-       u8      reserved[2];
+       __le16  element_id;     /* Feature/field ID */
+       __le16  element_id_msw; /* MSWord of field ID */
        __le32  address_high;
        __le32  address_low;
 };
@@ -1811,21 +1859,32 @@ struct i40e_aqc_nvm_config_write {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
 
+/* Used for 0x0704 as well as for 0x0705 commands */
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT                1
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
+                               (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE           0
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD   (1 << FEATURE_OR_IMMEDIATE_SHIFT)
 struct i40e_aqc_nvm_config_data_feature {
        __le16 feature_id;
-       __le16 instance_id;
+#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY           0x01
+#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP          0x08
+#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR            0x10
        __le16 feature_options;
        __le16 feature_selection;
 };
 
+I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
+
 struct i40e_aqc_nvm_config_data_immediate_field {
-#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
-       __le16 field_id;
-       __le16 instance_id;
+       __le32 field_id;
+       __le32 field_value;
        __le16 field_options;
-       __le16 field_value;
+       __le16 reserved;
 };
 
+I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
+
 /* Send to PF command (indirect 0x0801) id is only used by PF
  * Send to VF command (indirect 0x0802) id is only used by PF
  * Send to Peer PF command (indirect 0x0803)
@@ -2026,12 +2085,54 @@ struct i40e_aqc_get_cee_dcb_cfg_resp {
        u8      oper_tc_bw[8];
        u8      oper_pfc_en;
        __le16  oper_app_prio;
+#define I40E_AQC_CEE_APP_FCOE_SHIFT    0x0
+#define I40E_AQC_CEE_APP_FCOE_MASK     (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
+#define I40E_AQC_CEE_APP_ISCSI_SHIFT   0x3
+#define I40E_AQC_CEE_APP_ISCSI_MASK    (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
+#define I40E_AQC_CEE_APP_FIP_SHIFT     0x8
+#define I40E_AQC_CEE_APP_FIP_MASK      (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+#define I40E_AQC_CEE_APP_FIP_MASK      (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
        __le32  tlv_status;
+#define I40E_AQC_CEE_PG_STATUS_SHIFT   0x0
+#define I40E_AQC_CEE_PG_STATUS_MASK    (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
+#define I40E_AQC_CEE_PFC_STATUS_SHIFT  0x3
+#define I40E_AQC_CEE_PFC_STATUS_MASK   (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
+#define I40E_AQC_CEE_APP_STATUS_SHIFT  0x8
+#define I40E_AQC_CEE_APP_STATUS_MASK   (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
        u8      reserved[12];
 };
 
 I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
 
+/*     Set Local LLDP MIB (indirect 0x0A08)
+ *     Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT       0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK        (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+       u8      type;
+       u8      reserved0;
+       __le16  length;
+       u8      reserved1[4];
+       __le32  address_high;
+       __le32  address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
+
+/*     Stop/Start LLDP Agent (direct 0x0A09)
+ *     Used for stopping/starting specific LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_stop_start_specific_agent {
+#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT    0
+#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
+                               (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+       u8      command;
+       u8      reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
+
 /* Add Udp Tunnel command and completion (direct 0x0B00) */
 struct i40e_aqc_add_udp_tunnel {
        __le16  udp_port;
@@ -2106,7 +2207,8 @@ struct i40e_aqc_oem_param_change {
 #define I40E_AQ_OEM_PARAM_TYPE_BW_CTL  1
 #define I40E_AQ_OEM_PARAM_MAC          2
        __le32  param_value1;
-       u8      param_value2[8];
+       __le16  param_value2;
+       u8      reserved[6];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
@@ -2120,6 +2222,28 @@ struct i40e_aqc_oem_state_change {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
 
+/* Initialize OCSD (0xFE02, direct) */
+struct i40e_aqc_opc_oem_ocsd_initialize {
+       u8 type_status;
+       u8 reserved1[3];
+       __le32 ocsd_memory_block_addr_high;
+       __le32 ocsd_memory_block_addr_low;
+       __le32 requested_update_interval;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
+
+/* Initialize OCBB  (0xFE03, direct) */
+struct i40e_aqc_opc_oem_ocbb_initialize {
+       u8 type_status;
+       u8 reserved1[3];
+       __le32 ocbb_memory_block_addr_high;
+       __le32 ocbb_memory_block_addr_low;
+       u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
+
 /* debug commands */
 
 /* get device id (0xFF00) uses the generic structure */
index 3d741ee99a2cdea92c96efee39fda62c541c9b3e..5669bfa39f146bff7f52277468fbcd77348ee011 100644 (file)
@@ -741,6 +741,65 @@ i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
 }
 #endif
 
+/**
+ *  i40e_read_pba_string - Reads part number string from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @pba_num: stores the part number string from the EEPROM
+ *  @pba_num_size: part number string buffer length
+ *
+ *  Reads the part number string from the EEPROM.
+ **/
+i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+                                u32 pba_num_size)
+{
+       i40e_status status = 0;
+       u16 pba_word = 0;
+       u16 pba_size = 0;
+       u16 pba_ptr = 0;
+       u16 i = 0;
+
+       status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
+       if (status || (pba_word != 0xFAFA)) {
+               hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
+               return status;
+       }
+
+       status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
+       if (status) {
+               hw_dbg(hw, "Failed to read PBA Block pointer.\n");
+               return status;
+       }
+
+       status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
+       if (status) {
+               hw_dbg(hw, "Failed to read PBA Block size.\n");
+               return status;
+       }
+
+       /* Subtract one to get PBA word count (PBA Size word is included in
+        * total size)
+        */
+       pba_size--;
+       if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+               hw_dbg(hw, "Buffer to small for PBA data.\n");
+               return I40E_ERR_PARAM;
+       }
+
+       for (i = 0; i < pba_size; i++) {
+               status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
+               if (status) {
+                       hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
+                       return status;
+               }
+
+               pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+               pba_num[(i * 2) + 1] = pba_word & 0xFF;
+       }
+       pba_num[(pba_size * 2)] = '\0';
+
+       return status;
+}
+
 /**
  * i40e_get_media_type - Gets media type
  * @hw: pointer to the hardware structure
@@ -2034,6 +2093,43 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
        return status;
 }
 
+/**
+ * i40e_aq_debug_read_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the register using the admin queue commands
+ **/
+i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+                               u32  reg_addr, u64 *reg_val,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_debug_reg_read_write *cmd_resp =
+               (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+       i40e_status status;
+
+       if (reg_val == NULL)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_debug_read_reg);
+
+       cmd_resp->address = cpu_to_le32(reg_addr);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (!status) {
+               *reg_val = ((u64)cmd_resp->value_high << 32) |
+                           (u64)cmd_resp->value_low;
+               *reg_val = le64_to_cpu(*reg_val);
+       }
+
+       return status;
+}
+
 /**
  * i40e_aq_debug_write_register
  * @hw: pointer to the hw struct
@@ -2264,6 +2360,7 @@ i40e_aq_erase_nvm_exit:
 #define I40E_DEV_FUNC_CAP_VSI          0x17
 #define I40E_DEV_FUNC_CAP_DCB          0x18
 #define I40E_DEV_FUNC_CAP_FCOE         0x21
+#define I40E_DEV_FUNC_CAP_ISCSI                0x22
 #define I40E_DEV_FUNC_CAP_RSS          0x40
 #define I40E_DEV_FUNC_CAP_RX_QUEUES    0x41
 #define I40E_DEV_FUNC_CAP_TX_QUEUES    0x42
@@ -2292,6 +2389,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                                     enum i40e_admin_queue_opc list_type_opc)
 {
        struct i40e_aqc_list_capabilities_element_resp *cap;
+       u32 valid_functions, num_functions;
        u32 number, logical_id, phys_id;
        struct i40e_hw_capabilities *p;
        u32 i = 0;
@@ -2362,6 +2460,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                        if (number == 1)
                                p->fcoe = true;
                        break;
+               case I40E_DEV_FUNC_CAP_ISCSI:
+                       if (number == 1)
+                               p->iscsi = true;
+                       break;
                case I40E_DEV_FUNC_CAP_RSS:
                        p->rss = true;
                        p->rss_table_size = number;
@@ -2427,6 +2529,34 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
        if (p->npar_enable || p->mfp_mode_1)
                p->fcoe = false;
 
+       /* count the enabled ports (aka the "not disabled" ports) */
+       hw->num_ports = 0;
+       for (i = 0; i < 4; i++) {
+               u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
+               u64 port_cfg = 0;
+
+               /* use AQ read to get the physical register offset instead
+                * of the port relative offset
+                */
+               i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
+               if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
+                       hw->num_ports++;
+       }
+
+       valid_functions = p->valid_functions;
+       num_functions = 0;
+       while (valid_functions) {
+               if (valid_functions & 1)
+                       num_functions++;
+               valid_functions >>= 1;
+       }
+
+       /* partition id is 1-based, and functions are evenly spread
+        * across the ports as partitions
+        */
+       hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+       hw->num_partitions = num_functions / hw->num_ports;
+
        /* additional HW specific goodies that might
         * someday be HW version specific
         */
index 433a55886ad29bfb1b357d47ad4953f9ddac6145..61236f983971a1d55955586cd5a6ee288f5c60e7 100644 (file)
@@ -829,7 +829,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
                if (desc_n >= ring->count || desc_n < 0) {
                        dev_info(&pf->pdev->dev,
                                 "descriptor %d not found\n", desc_n);
-                       return;
+                       goto out;
                }
                if (!is_rx_ring) {
                        txd = I40E_TX_DESC(ring, desc_n);
@@ -855,6 +855,8 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
        } else {
                dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
        }
+
+out:
        kfree(ring);
 }
 
@@ -1888,7 +1890,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                dev_info(&pf->pdev->dev, "  dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
                dev_info(&pf->pdev->dev, "  dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
                dev_info(&pf->pdev->dev, "  dump desc aq\n");
-               dev_info(&pf->pdev->dev, "  dump stats\n");
                dev_info(&pf->pdev->dev, "  dump reset stats\n");
                dev_info(&pf->pdev->dev, "  msg_enable [level]\n");
                dev_info(&pf->pdev->dev, "  read <reg>\n");
index 951e8767fc502e57e91b40412dba09fa27f0e8c6..b8230dc205ec7f01a2c1e481097a8f9554420d15 100644 (file)
@@ -218,6 +218,16 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
 
 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
 
+/**
+ * i40e_partition_setting_complaint - generic complaint for MFP restriction
+ * @pf: the PF struct
+ **/
+static void i40e_partition_setting_complaint(struct i40e_pf *pf)
+{
+       dev_info(&pf->pdev->dev,
+                "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n");
+}
+
 /**
  * i40e_get_settings - Get Link Speed and Duplex settings
  * @netdev: network interface device structure
@@ -485,6 +495,14 @@ static int i40e_set_settings(struct net_device *netdev,
        u8 autoneg;
        u32 advertise;
 
+       /* Changing port settings is not supported if this isn't the
+        * port's controlling PF
+        */
+       if (hw->partition_id != 1) {
+               i40e_partition_setting_complaint(pf);
+               return -EOPNOTSUPP;
+       }
+
        if (vsi != pf->vsi[pf->lan_vsi])
                return -EOPNOTSUPP;
 
@@ -687,6 +705,14 @@ static int i40e_set_pauseparam(struct net_device *netdev,
        u8 aq_failures;
        int err = 0;
 
+       /* Changing the port's flow control is not supported if this isn't the
+        * port's controlling PF
+        */
+       if (hw->partition_id != 1) {
+               i40e_partition_setting_complaint(pf);
+               return -EOPNOTSUPP;
+       }
+
        if (vsi != pf->vsi[pf->lan_vsi])
                return -EOPNOTSUPP;
 
@@ -1503,7 +1529,7 @@ static void i40e_get_wol(struct net_device *netdev,
 
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-       if ((1 << hw->port) & wol_nvm_bits) {
+       if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
                wol->supported = 0;
                wol->wolopts = 0;
        } else {
@@ -1512,13 +1538,28 @@ static void i40e_get_wol(struct net_device *netdev,
        }
 }
 
+/**
+ * i40e_set_wol - set the WakeOnLAN configuration
+ * @netdev: the netdev in question
+ * @wol: the ethtool WoL setting data
+ **/
 static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_pf *pf = np->vsi->back;
+       struct i40e_vsi *vsi = np->vsi;
        struct i40e_hw *hw = &pf->hw;
        u16 wol_nvm_bits;
 
+       /* WoL not supported if this isn't the controlling PF on the port */
+       if (hw->partition_id != 1) {
+               i40e_partition_setting_complaint(pf);
+               return -EOPNOTSUPP;
+       }
+
+       if (vsi != pf->vsi[pf->lan_vsi])
+               return -EOPNOTSUPP;
+
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
        if (((1 << hw->port) & wol_nvm_bits))
index a8b8bd95108dd587bd961d144af334fb175da7f7..2cd841b290591727e9eaec0d6426fa0e381e4ce6 100644 (file)
@@ -1515,8 +1515,6 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
        i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
        i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
        i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
-       i40e_add_filter(vsi, FIP_ALL_VN2VN_MACS, 0, false, false);
-       i40e_add_filter(vsi, FIP_ALL_P2P_MACS, 0, false, false);
 
        /* use san mac */
        ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
index a5f2660d552d67b6f8c0123f1b63d7fc7ee4ad52..e774a23901f9c81efc5198ee1f6ba5175a10e478 100644 (file)
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 2
-#define DRV_VERSION_BUILD 2
+#define DRV_VERSION_BUILD 6
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -2819,8 +2819,9 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
  * i40e_enable_misc_int_causes - enable the non-queue interrupts
  * @hw: ptr to the hardware info
  **/
-static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
+static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
 {
+       struct i40e_hw *hw = &pf->hw;
        u32 val;
 
        /* clear things first */
@@ -2832,11 +2833,13 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
              I40E_PFINT_ICR0_ENA_GRST_MASK          |
              I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
              I40E_PFINT_ICR0_ENA_GPIO_MASK          |
-             I40E_PFINT_ICR0_ENA_TIMESYNC_MASK      |
              I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
              I40E_PFINT_ICR0_ENA_VFLR_MASK          |
              I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
 
+       if (pf->flags & I40E_FLAG_PTP)
+               val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+
        wr32(hw, I40E_PFINT_ICR0_ENA, val);
 
        /* SW_ITR_IDX = 0, but don't change INTENA */
@@ -2866,7 +2869,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
        q_vector->tx.latency_range = I40E_LOW_LATENCY;
        wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
 
-       i40e_enable_misc_int_causes(hw);
+       i40e_enable_misc_int_causes(pf);
 
        /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
        wr32(hw, I40E_PFINT_LNKLST0, 0);
@@ -3402,10 +3405,10 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
                err = i40e_vsi_request_irq_msix(vsi, basename);
        else if (pf->flags & I40E_FLAG_MSI_ENABLED)
                err = request_irq(pf->pdev->irq, i40e_intr, 0,
-                                 pf->misc_int_name, pf);
+                                 pf->int_name, pf);
        else
                err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
-                                 pf->misc_int_name, pf);
+                                 pf->int_name, pf);
 
        if (err)
                dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
@@ -3998,6 +4001,35 @@ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
 }
 
 #endif
+/**
+ * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
+ * @pf: pointer to pf
+ *
+ * Get TC map for ISCSI PF type that will include iSCSI TC
+ * and LAN TC.
+ **/
+static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
+{
+       struct i40e_dcb_app_priority_table app;
+       struct i40e_hw *hw = &pf->hw;
+       u8 enabled_tc = 1; /* TC0 is always enabled */
+       u8 tc, i;
+       /* Get the iSCSI APP TLV */
+       struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+       for (i = 0; i < dcbcfg->numapps; i++) {
+               app = dcbcfg->app[i];
+               if (app.selector == I40E_APP_SEL_TCPIP &&
+                   app.protocolid == I40E_APP_PROTOID_ISCSI) {
+                       tc = dcbcfg->etscfg.prioritytable[app.priority];
+                       enabled_tc |= (1 << tc);
+                       break;
+               }
+       }
+
+       return enabled_tc;
+}
+
 /**
  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
  * @dcbcfg: the corresponding DCBx configuration structure
@@ -4061,18 +4093,23 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
        if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
                return 1;
 
+       /* SFP mode will be enabled for all TCs on port */
+       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+               return i40e_dcb_get_num_tc(dcbcfg);
+
        /* MFP mode return count of enabled TCs for this PF */
-       if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+       if (pf->hw.func_caps.iscsi)
+               enabled_tc =  i40e_get_iscsi_tc_map(pf);
+       else
                enabled_tc = pf->hw.func_caps.enabled_tcmap;
-               for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-                       if (enabled_tc & (1 << i))
-                               num_tc++;
-               }
-               return num_tc;
-       }
 
-       /* SFP mode will be enabled for all TCs on port */
-       return i40e_dcb_get_num_tc(dcbcfg);
+       /* At least have TC0 */
+       enabled_tc = (enabled_tc ? enabled_tc : 0x1);
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               if (enabled_tc & (1 << i))
+                       num_tc++;
+       }
+       return num_tc;
 }
 
 /**
@@ -4110,12 +4147,15 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
        if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
                return i40e_pf_get_default_tc(pf);
 
-       /* MFP mode will have enabled TCs set by FW */
-       if (pf->flags & I40E_FLAG_MFP_ENABLED)
-               return pf->hw.func_caps.enabled_tcmap;
-
        /* SFP mode we want PF to be enabled for all TCs */
-       return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
+       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+               return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
+
+       /* MPF enabled and iSCSI PF type */
+       if (pf->hw.func_caps.iscsi)
+               return i40e_get_iscsi_tc_map(pf);
+       else
+               return pf->hw.func_caps.enabled_tcmap;
 }
 
 /**
@@ -4505,9 +4545,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
        struct i40e_hw *hw = &pf->hw;
        int err = 0;
 
-       if (pf->hw.func_caps.npar_enable)
-               goto out;
-
        /* Get the initial DCB configuration */
        err = i40e_init_dcb(hw);
        if (!err) {
@@ -4533,7 +4570,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
                                "DCBX offload is supported for this PF.\n");
                }
        } else {
-               dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
+               dev_info(&pf->pdev->dev,
+                        "AQ Querying DCB configuration failed: aq_err %d\n",
                         pf->hw.aq.asq_last_status);
        }
 
@@ -4557,6 +4595,15 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
                return;
        }
 
+       /* Warn user if link speed on NPAR enabled partition is not at
+        * least 10GB
+        */
+       if (vsi->back->hw.func_caps.npar_enable &&
+           (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
+            vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
+               netdev_warn(vsi->netdev,
+                           "The partition detected link speed that is less than 10Gbps\n");
+
        switch (vsi->back->hw.phy.link_info.link_speed) {
        case I40E_LINK_SPEED_40GB:
                strlcpy(speed, "40 Gbps", SPEED_SIZE);
@@ -4836,7 +4883,7 @@ static int i40e_open(struct net_device *netdev)
 int i40e_vsi_open(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
-       char int_name[IFNAMSIZ];
+       char int_name[I40E_INT_NAME_STR_LEN];
        int err;
 
        /* allocate descriptors */
@@ -4870,7 +4917,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
                        goto err_set_queues;
 
        } else if (vsi->type == I40E_VSI_FDIR) {
-               snprintf(int_name, sizeof(int_name) - 1, "%s-%s-fdir",
+               snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
                         dev_driver_string(&pf->pdev->dev),
                         dev_name(&pf->pdev->dev));
                err = i40e_vsi_request_irq(vsi, int_name);
@@ -5494,14 +5541,18 @@ static void i40e_link_event(struct i40e_pf *pf)
 {
        bool new_link, old_link;
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       u8 new_link_speed, old_link_speed;
 
        /* set this to force the get_link_status call to refresh state */
        pf->hw.phy.get_link_info = true;
 
        old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
        new_link = i40e_get_link_status(&pf->hw);
+       old_link_speed = pf->hw.phy.link_info_old.link_speed;
+       new_link_speed = pf->hw.phy.link_info.link_speed;
 
        if (new_link == old_link &&
+           new_link_speed == old_link_speed &&
            (test_bit(__I40E_DOWN, &vsi->state) ||
             new_link == netif_carrier_ok(vsi->netdev)))
                return;
@@ -6175,8 +6226,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
 #ifdef CONFIG_I40E_DCB
        ret = i40e_init_pf_dcb(pf);
        if (ret) {
-               dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret);
-               goto end_core_reset;
+               dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
+               pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+               /* Continue without DCB enabled */
        }
 #endif /* CONFIG_I40E_DCB */
 #ifdef I40E_FCOE
@@ -7113,16 +7165,16 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
         */
        if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
                err = request_irq(pf->msix_entries[0].vector,
-                                 i40e_intr, 0, pf->misc_int_name, pf);
+                                 i40e_intr, 0, pf->int_name, pf);
                if (err) {
                        dev_info(&pf->pdev->dev,
                                 "request_irq for %s failed: %d\n",
-                                pf->misc_int_name, err);
+                                pf->int_name, err);
                        return -EFAULT;
                }
        }
 
-       i40e_enable_misc_int_causes(hw);
+       i40e_enable_misc_int_causes(pf);
 
        /* associate no queues to the misc vector */
        wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
@@ -7306,7 +7358,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
 
 #endif /* I40E_FCOE */
 #ifdef CONFIG_PCI_IOV
-       if (pf->hw.func_caps.num_vfs) {
+       if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
                pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
                pf->flags |= I40E_FLAG_SRIOV_ENABLED;
                pf->num_req_vfs = min_t(int,
@@ -7766,7 +7818,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                enabled_tc = i40e_pf_get_tc_map(pf);
 
                /* MFP mode setup queue map and update VSI */
-               if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+               if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
+                   !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
                        memset(&ctxt, 0, sizeof(ctxt));
                        ctxt.seid = pf->main_vsi_seid;
                        ctxt.pf_num = pf->hw.pf_id;
@@ -7787,6 +7840,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                        /* Default/Main VSI is only enabled for TC0
                         * reconfigure it to enable all TCs that are
                         * available on the port in SFP mode.
+                        * For MFP case the iSCSI PF would use this
+                        * flow to enable LAN+iSCSI TC.
                         */
                        ret = i40e_vsi_config_tc(vsi, enabled_tc);
                        if (ret) {
@@ -9164,7 +9219,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
        pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
 
-       snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
+       snprintf(pf->int_name, sizeof(pf->int_name) - 1,
                 "%s-%s:misc",
                 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
 
@@ -9227,6 +9282,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_configure_lan_hmc;
        }
 
+       /* Disable LLDP for NICs that have firmware versions lower than v4.3.
+        * Ignore error return codes because if it was already disabled via
+        * hardware settings this will fail
+        */
+       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
+           (pf->hw.aq.fw_maj_ver < 4)) {
+               dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
+               i40e_aq_stop_lldp(hw, true, NULL);
+       }
+
        i40e_get_mac_addr(hw, hw->mac.addr);
        if (!is_valid_ether_addr(hw->mac.addr)) {
                dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
@@ -9256,7 +9321,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 #ifdef CONFIG_I40E_DCB
        err = i40e_init_pf_dcb(pf);
        if (err) {
-               dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
+               dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
                pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
                /* Continue without DCB enabled */
        }
index 045b5c4b98b38ba74ef68104828351f2a8fdc67c..ad802dd0f67a3d4fdcb6810ebdc8d66b67706d66 100644 (file)
@@ -78,7 +78,7 @@ do {                                                            \
 } while (0)
 
 typedef enum i40e_status_code i40e_status;
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifdef CONFIG_I40E_FCOE
 #define I40E_FCOE
-#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
+#endif
 #endif /* _I40E_OSDEP_H_ */
index 2fb4306597e8222fbc7c84d535ed1a307af6d2cd..68e852a96680229818cb3f4d99dc4f69b529c33a 100644 (file)
@@ -71,6 +71,9 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
                                        u32 reg_addr, u64 reg_val,
                                        struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+                               u32  reg_addr, u64 *reg_val,
+                               struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
@@ -245,6 +248,8 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw);
 bool i40e_get_link_status(struct i40e_hw *hw);
 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+                                u32 pba_num_size);
 i40e_status i40e_validate_mac_addr(u8 *mac_addr);
 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
 #ifdef I40E_FCOE
index 6d1ec926aa3713a6ebc40fa52aa2ee873f8292f4..fabcfa1b45b28817c0d2d3d1ed71c0c7e3054caf 100644 (file)
@@ -247,7 +247,12 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
        u32 prttsyn_stat;
        int n;
 
-       if (!(pf->flags & I40E_FLAG_PTP))
+       /* Since we cannot turn off the Rx timestamp logic if the device is
+        * configured for Tx timestamping, we check if Rx timestamping is
+        * configured. We don't want to spuriously warn about Rx timestamp
+        * hangs if we don't care about the timestamps.
+        */
+       if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
                return;
 
        prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
@@ -305,6 +310,13 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
        u32 hi, lo;
        u64 ns;
 
+       if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
+               return;
+
+       /* don't attempt to timestamp if we don't have an skb */
+       if (!pf->ptp_tx_skb)
+               return;
+
        lo = rd32(hw, I40E_PRTTSYN_TXTIME_L);
        hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);
 
@@ -338,7 +350,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
        /* Since we cannot turn off the Rx timestamp logic if the device is
         * doing Tx timestamping, check if Rx timestamping is configured.
         */
-       if (!pf->ptp_rx)
+       if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
                return;
 
        hw = &pf->hw;
@@ -467,7 +479,12 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
        switch (config->rx_filter) {
        case HWTSTAMP_FILTER_NONE:
                pf->ptp_rx = false;
-               tsyntype = 0;
+               /* We set the type to V1, but do not enable UDP packet
+                * recognition. In this way, we should be as close to
+                * disabling PTP Rx timestamps as possible since V1 packets
+                * are always UDP, since L2 packets are a V2 feature.
+                */
+               tsyntype = I40E_PRTTSYN_CTL1_TSYNTYPE_V1;
                break;
        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
@@ -521,17 +538,18 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
                regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
        wr32(hw, I40E_PFINT_ICR0_ENA, regval);
 
-       /* There is no simple on/off switch for Rx. To "disable" Rx support,
-        * ignore any received timestamps, rather than turn off the clock.
+       /* Although there is no simple on/off switch for Rx, we "disable" Rx
+        * timestamps by setting to V1 only mode and clear the UDP
+        * recognition. This ought to disable all PTP Rx timestamps as V1
+        * packets are always over UDP. Note that software is configured to
+        * ignore Rx timestamps via the pf->ptp_rx flag.
         */
-       if (pf->ptp_rx) {
-               regval = rd32(hw, I40E_PRTTSYN_CTL1);
-               /* clear everything but the enable bit */
-               regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
-               /* now enable bits for desired Rx timestamps */
-               regval |= tsyntype;
-               wr32(hw, I40E_PRTTSYN_CTL1, regval);
-       }
+       regval = rd32(hw, I40E_PRTTSYN_CTL1);
+       /* clear everything but the enable bit */
+       regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+       /* now enable bits for desired Rx timestamps */
+       regval |= tsyntype;
+       wr32(hw, I40E_PRTTSYN_CTL1, regval);
 
        return 0;
 }
index 04b441460bbda6e36cb64731d24247a1b954e506..420d66274d69fb7b9379bb064dc81161a5edefc8 100644 (file)
@@ -658,6 +658,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
        return le32_to_cpu(*(volatile __le32 *)head);
 }
 
+#define WB_STRIDE 0x3
+
 /**
  * i40e_clean_tx_irq - Reclaim resources after transmit completes
  * @tx_ring:  tx ring to clean
@@ -759,6 +761,18 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
        tx_ring->q_vector->tx.total_bytes += total_bytes;
        tx_ring->q_vector->tx.total_packets += total_packets;
 
+       /* check to see if there are any non-cache aligned descriptors
+        * waiting to be written back, and kick the hardware to force
+        * them to be written back in case of napi polling
+        */
+       if (budget &&
+           !((i & WB_STRIDE) == WB_STRIDE) &&
+           !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+           (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+               tx_ring->arm_wb = true;
+       else
+               tx_ring->arm_wb = false;
+
        if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
                /* schedule immediate reset if we believe we hung */
                dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -777,13 +791,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 
                dev_info(tx_ring->dev,
-                        "tx hang detected on queue %d, resetting adapter\n",
+                        "tx hang detected on queue %d, reset requested\n",
                         tx_ring->queue_index);
 
-               tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+               /* do not fire the reset immediately, wait for the stack to
+                * decide we are truly stuck, also prevents every queue from
+                * simultaneously requesting a reset
+                */
 
-               /* the adapter is about to reset, no point in enabling stuff */
-               return true;
+               /* the adapter is about to reset, no point in enabling polling */
+               budget = 1;
        }
 
        netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
@@ -806,7 +823,25 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                }
        }
 
-       return budget > 0;
+       return !!budget;
+}
+
+/**
+ * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
+ * @vsi: the VSI we care about
+ * @q_vector: the vector  on which to force writeback
+ *
+ **/
+static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+{
+       u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+                 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK
+                 /* allow 00 to be written to the index */;
+
+       wr32(&vsi->back->hw,
+            I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
+            val);
 }
 
 /**
@@ -1290,9 +1325,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
         * so the total length of IPv4 header is IHL*4 bytes
         * The UDP_0 bit *may* bet set if the *inner* header is UDP
         */
-       if (ipv4_tunnel &&
-           (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
-           !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+       if (ipv4_tunnel) {
                skb->transport_header = skb->mac_header +
                                        sizeof(struct ethhdr) +
                                        (ip_hdr(skb)->ihl * 4);
@@ -1302,15 +1335,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                          skb->protocol == htons(ETH_P_8021AD))
                                          ? VLAN_HLEN : 0;
 
-               rx_udp_csum = udp_csum(skb);
-               iph = ip_hdr(skb);
-               csum = csum_tcpudp_magic(
-                               iph->saddr, iph->daddr,
-                               (skb->len - skb_transport_offset(skb)),
-                               IPPROTO_UDP, rx_udp_csum);
+               if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+                   (udp_hdr(skb)->check != 0)) {
+                       rx_udp_csum = udp_csum(skb);
+                       iph = ip_hdr(skb);
+                       csum = csum_tcpudp_magic(
+                                       iph->saddr, iph->daddr,
+                                       (skb->len - skb_transport_offset(skb)),
+                                       IPPROTO_UDP, rx_udp_csum);
+
+                       if (udp_hdr(skb)->check != csum)
+                               goto checksum_fail;
 
-               if (udp_hdr(skb)->check != csum)
-                       goto checksum_fail;
+               } /* else its GRE and so no outer UDP header */
        }
 
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1581,6 +1618,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        struct i40e_vsi *vsi = q_vector->vsi;
        struct i40e_ring *ring;
        bool clean_complete = true;
+       bool arm_wb = false;
        int budget_per_ring;
 
        if (test_bit(__I40E_DOWN, &vsi->state)) {
@@ -1591,8 +1629,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        /* Since the actual Tx work is minimal, we can give the Tx a larger
         * budget and be more aggressive about cleaning up the Tx descriptors.
         */
-       i40e_for_each_ring(ring, q_vector->tx)
+       i40e_for_each_ring(ring, q_vector->tx) {
                clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+               arm_wb |= ring->arm_wb;
+       }
 
        /* We attempt to distribute budget to each Rx queue fairly, but don't
         * allow the budget to go below 1 because that would exit polling early.
@@ -1603,8 +1643,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
                clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
 
        /* If work not completed, return budget and polling will return */
-       if (!clean_complete)
+       if (!clean_complete) {
+               if (arm_wb)
+                       i40e_force_wb(vsi, q_vector);
                return budget;
+       }
 
        /* Work is done so exit the polling mode and re-enable the interrupt */
        napi_complete(napi);
@@ -1772,8 +1815,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
        u32  tx_flags = 0;
 
        /* if we have a HW VLAN tag being added, default to the HW one */
-       if (vlan_tx_tag_present(skb)) {
-               tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+       if (skb_vlan_tag_present(skb)) {
+               tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= I40E_TX_FLAGS_HW_VLAN;
        /* else if it is a SW VLAN, check the next protocol and store the tag */
        } else if (protocol == htons(ETH_P_8021Q)) {
@@ -1840,17 +1883,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       if (protocol == htons(ETH_P_IP)) {
-               iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+       if (iph->version == 4) {
                tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
                tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
                                                 0, IPPROTO_TCP, 0);
-       } else if (skb_is_gso_v6(skb)) {
-
-               ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
-                                          : ipv6_hdr(skb);
+       } else if (ipv6h->version == 6) {
                tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
                ipv6h->payload_len = 0;
                tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1897,6 +1939,9 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
         * we are not already transmitting a packet to be timestamped
         */
        pf = i40e_netdev_to_pf(tx_ring->netdev);
+       if (!(pf->flags & I40E_FLAG_PTP))
+               return 0;
+
        if (pf->ptp_tx &&
            !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -1946,13 +1991,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                         I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
                        }
                } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
-                       if (tx_flags & I40E_TX_FLAGS_TSO) {
-                               *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+                       *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+                       if (tx_flags & I40E_TX_FLAGS_TSO)
                                ip_hdr(skb)->check = 0;
-                       } else {
-                               *cd_tunneling |=
-                                        I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
-                       }
                }
 
                /* Now set the ctx descriptor fields */
@@ -1962,7 +2003,10 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                   ((skb_inner_network_offset(skb) -
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
-
+               if (this_ip_hdr->version == 6) {
+                       tx_flags &= ~I40E_TX_FLAGS_IPV4;
+                       tx_flags |= I40E_TX_FLAGS_IPV6;
+               }
        } else {
                network_hdr_len = skb_network_header_len(skb);
                this_ip_hdr = ip_hdr(skb);
@@ -2198,7 +2242,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
        /* Place RS bit on last descriptor of any packet that spans across the
         * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
         */
-#define WB_STRIDE 0x3
        if (((i & WB_STRIDE) != WB_STRIDE) &&
            (first <= &tx_ring->tx_bi[i]) &&
            (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
index e60d3accb2e2ec3f2992b056da718d633b978157..18b00231d2f117d714e7e1399aecba0061ead41a 100644 (file)
@@ -241,6 +241,7 @@ struct i40e_ring {
        unsigned long last_rx_timestamp;
 
        bool ring_active;               /* is ring online or not */
+       bool arm_wb;            /* do something to arm write back */
 
        /* stats structs */
        struct i40e_queue_stats stats;
index c1f2eb96335771fd9eae58623bd98a90132cbe41..e9901ef06a6361f9e2a1f56004764eb51ed60513 100644 (file)
@@ -211,6 +211,7 @@ struct i40e_hw_capabilities {
        bool evb_802_1_qbh; /* Bridge Port Extension */
        bool dcb;
        bool fcoe;
+       bool iscsi; /* Indicates iSCSI enabled */
        bool mfp_mode_1;
        bool mgmt_cem;
        bool ieee_1588;
@@ -431,7 +432,7 @@ struct i40e_hw {
        u8 __iomem *hw_addr;
        void *back;
 
-       /* function pointer structs */
+       /* subsystem structs */
        struct i40e_phy_info phy;
        struct i40e_mac_info mac;
        struct i40e_bus_info bus;
@@ -458,6 +459,11 @@ struct i40e_hw {
        u8  pf_id;
        u16 main_vsi_seid;
 
+       /* for multi-function MACs */
+       u16 partition_id;
+       u16 num_partitions;
+       u16 num_ports;
+
        /* Closest numa node to the device */
        u16 numa_node;
 
@@ -1135,6 +1141,8 @@ struct i40e_hw_port_stats {
 /* Checksum and Shadow RAM pointers */
 #define I40E_SR_NVM_CONTROL_WORD               0x00
 #define I40E_SR_EMP_MODULE_PTR                 0x0F
+#define I40E_SR_PBA_FLAGS                      0x15
+#define I40E_SR_PBA_BLOCK_PTR                  0x16
 #define I40E_SR_NVM_IMAGE_VERSION              0x18
 #define I40E_SR_NVM_WAKE_ON_LAN                        0x19
 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR  0x27
index 5bae89550657c19e9c5eeae08b13615c9744f158..044019b9d406148c1e3990572cf6fcdff9cc3bdd 100644 (file)
@@ -791,10 +791,18 @@ void i40e_free_vfs(struct i40e_pf *pf)
        if (!pf->vf)
                return;
 
+       /* Disable IOV before freeing resources. This lets any VF drivers
+        * running in the host get themselves cleaned up before we yank
+        * the carpet out from underneath their feet.
+        */
+       if (!pci_vfs_assigned(pf->pdev))
+               pci_disable_sriov(pf->pdev);
+
+       msleep(20); /* let any messages in transit get finished up */
+
        /* Disable interrupt 0 so we don't try to handle the VFLR. */
        i40e_irq_dynamic_disable_icr0(pf);
 
-       mdelay(10); /* let any messages in transit get finished up */
        /* free up vf resources */
        tmp = pf->num_alloc_vfs;
        pf->num_alloc_vfs = 0;
@@ -813,7 +821,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
         * before this function ever gets called.
         */
        if (!pci_vfs_assigned(pf->pdev)) {
-               pci_disable_sriov(pf->pdev);
                /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
                 * work correctly when SR-IOV gets re-enabled.
                 */
index 6c31bf22c2c31f88e996a402c2a4156cd9239f99..60f04e96a80e0a440faacf2c620777992259426b 100644 (file)
@@ -148,7 +148,7 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
 
 /* general information */
 #define I40E_AQ_LARGE_BUF      512
-#define I40E_ASQ_CMD_TIMEOUT   100  /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT   250  /* msecs */
 
 void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
                                       u16 opcode);
index ff1b16370da93231854ef076ccf8cb2a99e25299..e715bccfb5d256c055a99163a1fd0d177c190754 100644 (file)
@@ -268,6 +268,8 @@ enum i40e_admin_queue_opc {
        /* OEM commands */
        i40e_aqc_opc_oem_parameter_change       = 0xFE00,
        i40e_aqc_opc_oem_device_status_change   = 0xFE01,
+       i40e_aqc_opc_oem_ocsd_initialize        = 0xFE02,
+       i40e_aqc_opc_oem_ocbb_initialize        = 0xFE03,
 
        /* debug commands */
        i40e_aqc_opc_debug_get_deviceid         = 0xFF00,
@@ -276,7 +278,6 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_debug_write_reg            = 0xFF04,
        i40e_aqc_opc_debug_modify_reg           = 0xFF07,
        i40e_aqc_opc_debug_dump_internals       = 0xFF08,
-       i40e_aqc_opc_debug_modify_internals     = 0xFF09,
 };
 
 /* command structures and indirect data structures */
@@ -410,6 +411,7 @@ struct i40e_aqc_list_capabilities_element_resp {
 #define I40E_AQ_CAP_ID_VSI             0x0017
 #define I40E_AQ_CAP_ID_DCB             0x0018
 #define I40E_AQ_CAP_ID_FCOE            0x0021
+#define I40E_AQ_CAP_ID_ISCSI           0x0022
 #define I40E_AQ_CAP_ID_RSS             0x0040
 #define I40E_AQ_CAP_ID_RXQ             0x0041
 #define I40E_AQ_CAP_ID_TXQ             0x0042
@@ -454,8 +456,11 @@ struct i40e_aqc_arp_proxy_data {
        __le32  pfpm_proxyfc;
        __le32  ip_addr;
        u8      mac_addr[6];
+       u8      reserved[2];
 };
 
+I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
+
 /* Set NS Proxy Table Entry Command (indirect 0x0105) */
 struct i40e_aqc_ns_proxy_data {
        __le16  table_idx_mac_addr_0;
@@ -481,6 +486,8 @@ struct i40e_aqc_ns_proxy_data {
        u8      ipv6_addr_1[16];
 };
 
+I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
+
 /* Manage LAA Command (0x0106) - obsolete */
 struct i40e_aqc_mng_laa {
        __le16  command_flags;
@@ -491,6 +498,8 @@ struct i40e_aqc_mng_laa {
        u8      reserved2[6];
 };
 
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
+
 /* Manage MAC Address Read Command (indirect 0x0107) */
 struct i40e_aqc_mac_address_read {
        __le16  command_flags;
@@ -562,6 +571,8 @@ struct i40e_aqc_get_switch_config_header_resp {
        u8      reserved[12];
 };
 
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
+
 struct i40e_aqc_switch_config_element_resp {
        u8      element_type;
 #define I40E_AQ_SW_ELEM_TYPE_MAC       1
@@ -587,6 +598,8 @@ struct i40e_aqc_switch_config_element_resp {
        __le16  element_info;
 };
 
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
+
 /* Get Switch Configuration (indirect 0x0200)
  *    an array of elements are returned in the response buffer
  *    the first in the array is the header, remainder are elements
@@ -596,6 +609,8 @@ struct i40e_aqc_get_switch_config_resp {
        struct i40e_aqc_switch_config_element_resp      element[1];
 };
 
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
+
 /* Add Statistics (direct 0x0201)
  * Remove Statistics (direct 0x0202)
  */
@@ -661,6 +676,8 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
        u8      reserved2[6];
 };
 
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
+
 /* Add VSI (indirect 0x0210)
  *    this indirect command uses struct i40e_aqc_vsi_properties_data
  *    as the indirect buffer (128 bytes)
@@ -1092,6 +1109,8 @@ struct i40e_aqc_remove_tag {
        u8      reserved[12];
 };
 
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
+
 /* Add multicast E-Tag (direct 0x0257)
  * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
  * and no external data
@@ -1207,7 +1226,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
        } ipaddr;
        __le16  flags;
 #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT                        0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK                 (0x3F << \
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
                                        I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
 /* 0x0000 reserved */
 #define I40E_AQC_ADD_CLOUD_FILTER_OIP                  0x0001
@@ -1240,7 +1259,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
        u8      reserved[4];
        __le16  queue_number;
 #define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT         0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK          (0x3F << \
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK          (0x7FF << \
                                                 I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
        u8      reserved2[14];
        /* response section */
@@ -1359,6 +1378,8 @@ struct i40e_aqc_configure_vsi_ets_sla_bw_data {
        u8      reserved1[28];
 };
 
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
+
 /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
  *    responds with i40e_aqc_qs_handles_resp
  */
@@ -1370,6 +1391,8 @@ struct i40e_aqc_configure_vsi_tc_bw_data {
        __le16  qs_handles[8];
 };
 
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
+
 /* Query vsi bw configuration (indirect 0x0408) */
 struct i40e_aqc_query_vsi_bw_config_resp {
        u8      tc_valid_bits;
@@ -1383,6 +1406,8 @@ struct i40e_aqc_query_vsi_bw_config_resp {
        u8      reserved3[23];
 };
 
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
+
 /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
 struct i40e_aqc_query_vsi_ets_sla_config_resp {
        u8      tc_valid_bits;
@@ -1394,6 +1419,8 @@ struct i40e_aqc_query_vsi_ets_sla_config_resp {
        __le16  tc_bw_max[2];
 };
 
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
+
 /* Configure Switching Component Bandwidth Limit (direct 0x0410) */
 struct i40e_aqc_configure_switching_comp_bw_limit {
        __le16  seid;
@@ -1421,6 +1448,8 @@ struct i40e_aqc_configure_switching_comp_ets_data {
        u8      reserved2[96];
 };
 
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
+
 /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
 struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
        u8      tc_valid_bits;
@@ -1432,6 +1461,9 @@ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
        u8      reserved1[28];
 };
 
+I40E_CHECK_STRUCT_LEN(0x40,
+                     i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+
 /* Configure Switching Component Bandwidth Allocation per Tc
  * (indirect 0x0417)
  */
@@ -1443,6 +1475,8 @@ struct i40e_aqc_configure_switching_comp_bw_config_data {
        u8      reserved1[20];
 };
 
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
+
 /* Query Switching Component Configuration (indirect 0x0418) */
 struct i40e_aqc_query_switching_comp_ets_config_resp {
        u8      tc_valid_bits;
@@ -1453,6 +1487,8 @@ struct i40e_aqc_query_switching_comp_ets_config_resp {
        u8      reserved2[23];
 };
 
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
+
 /* Query PhysicalPort ETS Configuration (indirect 0x0419) */
 struct i40e_aqc_query_port_ets_config_resp {
        u8      reserved[4];
@@ -1468,6 +1504,8 @@ struct i40e_aqc_query_port_ets_config_resp {
        u8      reserved3[32];
 };
 
+I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
+
 /* Query Switching Component Bandwidth Allocation per Traffic Type
  * (indirect 0x041A)
  */
@@ -1482,6 +1520,8 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
        __le16  tc_bw_max[2];
 };
 
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
+
 /* Suspend/resume port TX traffic
  * (direct 0x041B and 0x041C) uses the generic SEID struct
  */
@@ -1495,6 +1535,8 @@ struct i40e_aqc_configure_partition_bw_data {
        u8      max_bw[16];      /* bandwidth limit */
 };
 
+I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+
 /* Get and set the active HMC resource profile and status.
  * (direct 0x0500) and (direct 0x0501)
  */
@@ -1577,6 +1619,8 @@ struct i40e_aqc_module_desc {
        u8 reserved2[8];
 };
 
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
+
 struct i40e_aq_get_phy_abilities_resp {
        __le32  phy_type;       /* bitmap using the above enum for offsets */
        u8      link_speed;     /* bitmap using the above enum bit patterns */
@@ -1605,6 +1649,8 @@ struct i40e_aq_get_phy_abilities_resp {
        struct i40e_aqc_module_desc     qualified_module[I40E_AQ_PHY_MAX_QMS];
 };
 
+I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
+
 /* Set PHY Config (direct 0x0601) */
 struct i40e_aq_set_phy_config { /* same bits as above in all */
        __le32  phy_type;
@@ -1788,12 +1834,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
 /* NVM Config Read (indirect 0x0704) */
 struct i40e_aqc_nvm_config_read {
        __le16  cmd_flags;
-#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK  1
-#define ANVM_READ_SINGLE_FEATURE               0
-#define ANVM_READ_MULTIPLE_FEATURES            1
+#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK  1
+#define I40E_AQ_ANVM_READ_SINGLE_FEATURE               0
+#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES            1
        __le16  element_count;
-       __le16  element_id; /* Feature/field ID */
-       u8      reserved[2];
+       __le16  element_id;     /* Feature/field ID */
+       __le16  element_id_msw; /* MSWord of field ID */
        __le32  address_high;
        __le32  address_low;
 };
@@ -1811,21 +1857,32 @@ struct i40e_aqc_nvm_config_write {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
 
+/* Used for 0x0704 as well as for 0x0705 commands */
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT                1
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
+                               (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE           0
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD   (1 << FEATURE_OR_IMMEDIATE_SHIFT)
 struct i40e_aqc_nvm_config_data_feature {
        __le16 feature_id;
-       __le16 instance_id;
+#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY           0x01
+#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP          0x08
+#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR            0x10
        __le16 feature_options;
        __le16 feature_selection;
 };
 
+I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
+
 struct i40e_aqc_nvm_config_data_immediate_field {
-#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
-       __le16 field_id;
-       __le16 instance_id;
+       __le32 field_id;
+       __le32 field_value;
        __le16 field_options;
-       __le16 field_value;
+       __le16 reserved;
 };
 
+I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
+
 /* Send to PF command (indirect 0x0801) id is only used by PF
  * Send to VF command (indirect 0x0802) id is only used by PF
  * Send to Peer PF command (indirect 0x0803)
@@ -2082,7 +2139,8 @@ struct i40e_aqc_oem_param_change {
 #define I40E_AQ_OEM_PARAM_TYPE_BW_CTL  1
 #define I40E_AQ_OEM_PARAM_MAC          2
        __le32  param_value1;
-       u8      param_value2[8];
+       __le16  param_value2;
+       u8      reserved[6];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
@@ -2096,6 +2154,28 @@ struct i40e_aqc_oem_state_change {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
 
+/* Initialize OCSD (0xFE02, direct) */
+struct i40e_aqc_opc_oem_ocsd_initialize {
+       u8 type_status;
+       u8 reserved1[3];
+       __le32 ocsd_memory_block_addr_high;
+       __le32 ocsd_memory_block_addr_low;
+       __le32 requested_update_interval;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
+
+/* Initialize OCBB  (0xFE03, direct) */
+struct i40e_aqc_opc_oem_ocbb_initialize {
+       u8 type_status;
+       u8 reserved1[3];
+       __le32 ocbb_memory_block_addr_high;
+       __le32 ocbb_memory_block_addr_low;
+       u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
+
 /* debug commands */
 
 /* get device id (0xFF00) uses the generic structure */
index 04c7c1557a0c770ab0f256d3012cd3b9be70240e..82c3798fdd369a561ab074f3392a63608dc09db0 100644 (file)
@@ -1122,8 +1122,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
        u32  tx_flags = 0;
 
        /* if we have a HW VLAN tag being added, default to the HW one */
-       if (vlan_tx_tag_present(skb)) {
-               tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+       if (skb_vlan_tag_present(skb)) {
+               tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= I40E_TX_FLAGS_HW_VLAN;
        /* else if it is a SW VLAN, check the next protocol and store the tag */
        } else if (protocol == htons(ETH_P_8021Q)) {
index 68aec11f652335c42463066677775200dbbcb333..3d0fdaab5cc8404e242b8ecfb0e12e12198f41cc 100644 (file)
@@ -211,6 +211,7 @@ struct i40e_hw_capabilities {
        bool evb_802_1_qbh; /* Bridge Port Extension */
        bool dcb;
        bool fcoe;
+       bool iscsi; /* Indicates iSCSI enabled */
        bool mfp_mode_1;
        bool mgmt_cem;
        bool ieee_1588;
@@ -425,7 +426,7 @@ struct i40e_hw {
        u8 __iomem *hw_addr;
        void *back;
 
-       /* function pointer structs */
+       /* subsystem structs */
        struct i40e_phy_info phy;
        struct i40e_mac_info mac;
        struct i40e_bus_info bus;
@@ -452,6 +453,11 @@ struct i40e_hw {
        u8  pf_id;
        u16 main_vsi_seid;
 
+       /* for multi-function MACs */
+       u16 partition_id;
+       u16 num_partitions;
+       u16 num_ports;
+
        /* Closest numa node to the device */
        u16 numa_node;
 
index cabaf599f562c3bc4aec6c332f801f98387b64ca..f946aac1df71a79665e3d587730962af9e2b1553 100644 (file)
@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
 static const char i40evf_driver_string[] =
        "Intel(R) XL710/X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "1.0.6"
+#define DRV_VERSION "1.2.0"
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
        "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -313,10 +313,6 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
        val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
        wr32(hw, I40E_VFINT_DYN_CTL01, val);
 
-       /* re-enable interrupt causes */
-       wr32(hw, I40E_VFINT_ICR0_ENA1, ena_mask);
-       wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
-
        /* schedule work on the private workqueue */
        schedule_work(&adapter->adminq_task);
 
@@ -946,30 +942,6 @@ static int i40evf_up_complete(struct i40evf_adapter *adapter)
        return 0;
 }
 
-/**
- * i40evf_clean_all_rx_rings - Free Rx Buffers for all queues
- * @adapter: board private structure
- **/
-static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
-{
-       int i;
-
-       for (i = 0; i < adapter->num_active_queues; i++)
-               i40evf_clean_rx_ring(adapter->rx_rings[i]);
-}
-
-/**
- * i40evf_clean_all_tx_rings - Free Tx Buffers for all queues
- * @adapter: board private structure
- **/
-static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
-{
-       int i;
-
-       for (i = 0; i < adapter->num_active_queues; i++)
-               i40evf_clean_tx_ring(adapter->tx_rings[i]);
-}
-
 /**
  * i40e_down - Shutdown the connection processing
  * @adapter: board private structure
@@ -982,6 +954,12 @@ void i40evf_down(struct i40evf_adapter *adapter)
        if (adapter->state == __I40EVF_DOWN)
                return;
 
+       while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+                               &adapter->crit_section))
+               usleep_range(500, 1000);
+
+       i40evf_irq_disable(adapter);
+
        /* remove all MAC filters */
        list_for_each_entry(f, &adapter->mac_filter_list, list) {
                f->remove = true;
@@ -992,25 +970,27 @@ void i40evf_down(struct i40evf_adapter *adapter)
        }
        if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
            adapter->state != __I40EVF_RESETTING) {
-               adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+               /* cancel any current operation */
+               adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+               adapter->aq_pending = 0;
+               /* Schedule operations to close down the HW. Don't wait
+                * here for this to complete. The watchdog is still running
+                * and it will take care of this.
+                */
+               adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
                adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
-               /* disable receives */
                adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
-               mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
-               msleep(20);
        }
        netif_tx_disable(netdev);
 
        netif_tx_stop_all_queues(netdev);
 
-       i40evf_irq_disable(adapter);
-
        i40evf_napi_disable_all(adapter);
 
-       netif_carrier_off(netdev);
+       msleep(20);
 
-       i40evf_clean_all_tx_rings(adapter);
-       i40evf_clean_all_rx_rings(adapter);
+       netif_carrier_off(netdev);
+       clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 }
 
 /**
@@ -1356,8 +1336,13 @@ static void i40evf_watchdog_task(struct work_struct *work)
        /* Process admin queue tasks. After init, everything gets done
         * here so we don't race on the admin queue.
         */
-       if (adapter->aq_pending)
+       if (adapter->aq_pending) {
+               if (!i40evf_asq_done(hw)) {
+                       dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
+                       i40evf_send_api_ver(adapter);
+               }
                goto watchdog_done;
+       }
 
        if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
                i40evf_map_queues(adapter);
@@ -1401,11 +1386,14 @@ static void i40evf_watchdog_task(struct work_struct *work)
 
        if (adapter->state == __I40EVF_RUNNING)
                i40evf_request_stats(adapter);
-
-       i40evf_irq_enable(adapter, true);
-       i40evf_fire_sw_int(adapter, 0xFF);
-
 watchdog_done:
+       if (adapter->state == __I40EVF_RUNNING) {
+               i40evf_irq_enable_queues(adapter, ~0);
+               i40evf_fire_sw_int(adapter, 0xFF);
+       } else {
+               i40evf_fire_sw_int(adapter, 0x1);
+       }
+
        clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 restart_watchdog:
        if (adapter->state == __I40EVF_REMOVE)
@@ -1633,12 +1621,12 @@ static void i40evf_adminq_task(struct work_struct *work)
        u16 pending;
 
        if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
-               return;
+               goto out;
 
        event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
        event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
        if (!event.msg_buf)
-               return;
+               goto out;
 
        v_msg = (struct i40e_virtchnl_msg *)&event.desc;
        do {
@@ -1688,10 +1676,10 @@ static void i40evf_adminq_task(struct work_struct *work)
        if (oldval != val)
                wr32(hw, hw->aq.asq.len, val);
 
+       kfree(event.msg_buf);
+out:
        /* re-enable Admin queue interrupt cause */
        i40evf_misc_irq_enable(adapter);
-
-       kfree(event.msg_buf);
 }
 
 /**
@@ -2053,12 +2041,8 @@ static void i40evf_init_task(struct work_struct *work)
                /* aq msg sent, awaiting reply */
                err = i40evf_verify_api_ver(adapter);
                if (err) {
-                       dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
-                                err);
-                       if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
-                               dev_info(&pdev->dev, "Resending request\n");
+                       if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
                                err = i40evf_send_api_ver(adapter);
-                       }
                        goto err;
                }
                err = i40evf_send_vf_config_msg(adapter);
@@ -2081,7 +2065,6 @@ static void i40evf_init_task(struct work_struct *work)
                }
                err = i40evf_get_vf_config(adapter);
                if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
-                       dev_info(&pdev->dev, "Resending VF config request\n");
                        err = i40evf_send_vf_config_msg(adapter);
                        goto err;
                }
@@ -2440,6 +2423,7 @@ static void i40evf_remove(struct pci_dev *pdev)
        struct i40evf_adapter *adapter = netdev_priv(netdev);
        struct i40evf_mac_filter *f, *ftmp;
        struct i40e_hw *hw = &adapter->hw;
+       int count = 50;
 
        cancel_delayed_work_sync(&adapter->init_task);
        cancel_work_sync(&adapter->reset_task);
@@ -2448,6 +2432,11 @@ static void i40evf_remove(struct pci_dev *pdev)
                unregister_netdev(netdev);
                adapter->netdev_registered = false;
        }
+       while (count-- && adapter->aq_required)
+               msleep(50);
+
+       if (count < 0)
+               dev_err(&pdev->dev, "Timed out waiting for PF driver.\n");
        adapter->state = __I40EVF_REMOVE;
 
        if (adapter->msix_entries) {
@@ -2477,6 +2466,10 @@ static void i40evf_remove(struct pci_dev *pdev)
                list_del(&f->list);
                kfree(f);
        }
+       list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+               list_del(&f->list);
+               kfree(f);
+       }
 
        free_netdev(netdev);
 
index 5fde5a7f4591053497d26d2423cf031fc6a9523a..3f0c85ecbca68c997aeec7bd53f66bc7c1736d95 100644 (file)
@@ -715,14 +715,14 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                }
                return;
        }
-       if (v_opcode != adapter->current_op)
-               dev_info(&adapter->pdev->dev, "Pending op is %d, received %d\n",
-                        adapter->current_op, v_opcode);
        if (v_retval) {
                dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
                        __func__, v_retval, v_opcode);
        }
        switch (v_opcode) {
+       case I40E_VIRTCHNL_OP_VERSION:
+               /* no action, but also not an error */
+               break;
        case I40E_VIRTCHNL_OP_GET_STATS: {
                struct i40e_eth_stats *stats =
                        (struct i40e_eth_stats *)msg;
index 051ea94bdcd3e8046181b361d8985c51d15ea19c..0f69ef81751a3d8154db558cc8f3d11e882928a0 100644 (file)
@@ -1125,7 +1125,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
        u32 swmask = mask;
        u32 fwmask = mask << 16;
        s32 ret_val = 0;
-       s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+       s32 i = 0, timeout = 200;
 
        while (i < timeout) {
                if (igb_get_hw_semaphore(hw)) {
index ee22da391474275c77380e6268c7a7038e04e641..c2bd4f98a8376ecab82b99623ce2a9454ed148a9 100644 (file)
@@ -343,6 +343,9 @@ struct hwmon_buff {
        };
 #endif
 
+#define IGB_N_EXTTS    2
+#define IGB_N_PEROUT   2
+#define IGB_N_SDP      4
 #define IGB_RETA_SIZE  128
 
 /* board specific private data structure */
@@ -439,6 +442,12 @@ struct igb_adapter {
        u32 tx_hwtstamp_timeouts;
        u32 rx_hwtstamp_cleared;
 
+       struct ptp_pin_desc sdp_config[IGB_N_SDP];
+       struct {
+               struct timespec start;
+               struct timespec period;
+       } perout[IGB_N_PEROUT];
+
        char fw_version[32];
 #ifdef CONFIG_IGB_HWMON
        struct hwmon_buff *igb_hwmon_buff;
index ff59897a946363442d657e84784962c6a600a45b..f366b3b96d03db4cd54351bad599d425520d4df7 100644 (file)
@@ -5035,9 +5035,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
 
        skb_tx_timestamp(skb);
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                tx_flags |= IGB_TX_FLAGS_VLAN;
-               tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
+               tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
        }
 
        /* record initial flags and protocol */
@@ -5384,6 +5384,80 @@ void igb_update_stats(struct igb_adapter *adapter,
        }
 }
 
+static void igb_tsync_interrupt(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       struct ptp_clock_event event;
+       struct timespec ts;
+       u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
+
+       if (tsicr & TSINTR_SYS_WRAP) {
+               event.type = PTP_CLOCK_PPS;
+               if (adapter->ptp_caps.pps)
+                       ptp_clock_event(adapter->ptp_clock, &event);
+               else
+                       dev_err(&adapter->pdev->dev, "unexpected SYS WRAP");
+               ack |= TSINTR_SYS_WRAP;
+       }
+
+       if (tsicr & E1000_TSICR_TXTS) {
+               /* retrieve hardware timestamp */
+               schedule_work(&adapter->ptp_tx_work);
+               ack |= E1000_TSICR_TXTS;
+       }
+
+       if (tsicr & TSINTR_TT0) {
+               spin_lock(&adapter->tmreg_lock);
+               ts = timespec_add(adapter->perout[0].start,
+                                 adapter->perout[0].period);
+               wr32(E1000_TRGTTIML0, ts.tv_nsec);
+               wr32(E1000_TRGTTIMH0, ts.tv_sec);
+               tsauxc = rd32(E1000_TSAUXC);
+               tsauxc |= TSAUXC_EN_TT0;
+               wr32(E1000_TSAUXC, tsauxc);
+               adapter->perout[0].start = ts;
+               spin_unlock(&adapter->tmreg_lock);
+               ack |= TSINTR_TT0;
+       }
+
+       if (tsicr & TSINTR_TT1) {
+               spin_lock(&adapter->tmreg_lock);
+               ts = timespec_add(adapter->perout[1].start,
+                                 adapter->perout[1].period);
+               wr32(E1000_TRGTTIML1, ts.tv_nsec);
+               wr32(E1000_TRGTTIMH1, ts.tv_sec);
+               tsauxc = rd32(E1000_TSAUXC);
+               tsauxc |= TSAUXC_EN_TT1;
+               wr32(E1000_TSAUXC, tsauxc);
+               adapter->perout[1].start = ts;
+               spin_unlock(&adapter->tmreg_lock);
+               ack |= TSINTR_TT1;
+       }
+
+       if (tsicr & TSINTR_AUTT0) {
+               nsec = rd32(E1000_AUXSTMPL0);
+               sec  = rd32(E1000_AUXSTMPH0);
+               event.type = PTP_CLOCK_EXTTS;
+               event.index = 0;
+               event.timestamp = sec * 1000000000ULL + nsec;
+               ptp_clock_event(adapter->ptp_clock, &event);
+               ack |= TSINTR_AUTT0;
+       }
+
+       if (tsicr & TSINTR_AUTT1) {
+               nsec = rd32(E1000_AUXSTMPL1);
+               sec  = rd32(E1000_AUXSTMPH1);
+               event.type = PTP_CLOCK_EXTTS;
+               event.index = 1;
+               event.timestamp = sec * 1000000000ULL + nsec;
+               ptp_clock_event(adapter->ptp_clock, &event);
+               ack |= TSINTR_AUTT1;
+       }
+
+       /* acknowledge the interrupts */
+       wr32(E1000_TSICR, ack);
+}
+
 static irqreturn_t igb_msix_other(int irq, void *data)
 {
        struct igb_adapter *adapter = data;
@@ -5415,16 +5489,8 @@ static irqreturn_t igb_msix_other(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-       if (icr & E1000_ICR_TS) {
-               u32 tsicr = rd32(E1000_TSICR);
-
-               if (tsicr & E1000_TSICR_TXTS) {
-                       /* acknowledge the interrupt */
-                       wr32(E1000_TSICR, E1000_TSICR_TXTS);
-                       /* retrieve hardware timestamp */
-                       schedule_work(&adapter->ptp_tx_work);
-               }
-       }
+       if (icr & E1000_ICR_TS)
+               igb_tsync_interrupt(adapter);
 
        wr32(E1000_EIMS, adapter->eims_other);
 
@@ -6011,8 +6077,12 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
        adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
 
        /* reply to reset with ack and vf mac address */
-       msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
-       memcpy(addr, vf_mac, ETH_ALEN);
+       if (!is_zero_ether_addr(vf_mac)) {
+               msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
+               memcpy(addr, vf_mac, ETH_ALEN);
+       } else {
+               msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
+       }
        igb_write_mbx(hw, msgbuf, 3, vf);
 }
 
@@ -6203,16 +6273,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-       if (icr & E1000_ICR_TS) {
-               u32 tsicr = rd32(E1000_TSICR);
-
-               if (tsicr & E1000_TSICR_TXTS) {
-                       /* acknowledge the interrupt */
-                       wr32(E1000_TSICR, E1000_TSICR_TXTS);
-                       /* retrieve hardware timestamp */
-                       schedule_work(&adapter->ptp_tx_work);
-               }
-       }
+       if (icr & E1000_ICR_TS)
+               igb_tsync_interrupt(adapter);
 
        napi_schedule(&q_vector->napi);
 
@@ -6257,16 +6319,8 @@ static irqreturn_t igb_intr(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-       if (icr & E1000_ICR_TS) {
-               u32 tsicr = rd32(E1000_TSICR);
-
-               if (tsicr & E1000_TSICR_TXTS) {
-                       /* acknowledge the interrupt */
-                       wr32(E1000_TSICR, E1000_TSICR_TXTS);
-                       /* retrieve hardware timestamp */
-                       schedule_work(&adapter->ptp_tx_work);
-               }
-       }
+       if (icr & E1000_ICR_TS)
+               igb_tsync_interrupt(adapter);
 
        napi_schedule(&q_vector->napi);
 
@@ -6527,15 +6581,17 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
                                         DMA_FROM_DEVICE);
 }
 
+static inline bool igb_page_is_reserved(struct page *page)
+{
+       return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+}
+
 static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
                                  struct page *page,
                                  unsigned int truesize)
 {
        /* avoid re-using remote pages */
-       if (unlikely(page_to_nid(page) != numa_node_id()))
-               return false;
-
-       if (unlikely(page->pfmemalloc))
+       if (unlikely(igb_page_is_reserved(page)))
                return false;
 
 #if (PAGE_SIZE < 8192)
@@ -6545,22 +6601,19 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
 
        /* flip page offset to other buffer */
        rx_buffer->page_offset ^= IGB_RX_BUFSZ;
-
-       /* Even if we own the page, we are not allowed to use atomic_set()
-        * This would break get_page_unless_zero() users.
-        */
-       atomic_inc(&page->_count);
 #else
        /* move offset up to the next cache line */
        rx_buffer->page_offset += truesize;
 
        if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
                return false;
-
-       /* bump ref count on page before it is given to the stack */
-       get_page(page);
 #endif
 
+       /* Even if we own the page, we are not allowed to use atomic_set()
+        * This would break get_page_unless_zero() users.
+        */
+       atomic_inc(&page->_count);
+
        return true;
 }
 
@@ -6603,13 +6656,12 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
 
                memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
-               /* we can reuse buffer as-is, just make sure it is local */
-               if (likely((page_to_nid(page) == numa_node_id()) &&
-                          !page->pfmemalloc))
+               /* page is not reserved, we can reuse buffer as-is */
+               if (likely(!igb_page_is_reserved(page)))
                        return true;
 
                /* this page cannot be reused so discard it */
-               put_page(page);
+               __free_page(page);
                return false;
        }
 
@@ -6627,7 +6679,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
        struct page *page;
 
        rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
-
        page = rx_buffer->page;
        prefetchw(page);
 
@@ -7042,8 +7093,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
                        i -= rx_ring->count;
                }
 
-               /* clear the hdr_addr for the next_to_use descriptor */
-               rx_desc->read.hdr_addr = 0;
+               /* clear the status bits for the next_to_use descriptor */
+               rx_desc->wb.upper.status_error = 0;
 
                cleaned_count--;
        } while (cleaned_count);
index 5e7a4e30a7b639b30622e509ce1423ecb2339091..d20fc8ed11f1574a2ae0fa4649be23fe204e1308 100644 (file)
@@ -355,12 +355,239 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
        return 0;
 }
 
+static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext)
+{
+       u32 *ptr = pin < 2 ? ctrl : ctrl_ext;
+       u32 mask[IGB_N_SDP] = {
+               E1000_CTRL_SDP0_DIR,
+               E1000_CTRL_SDP1_DIR,
+               E1000_CTRL_EXT_SDP2_DIR,
+               E1000_CTRL_EXT_SDP3_DIR,
+       };
+
+       if (input)
+               *ptr &= ~mask[pin];
+       else
+               *ptr |= mask[pin];
+}
+
+static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
+{
+       struct e1000_hw *hw = &igb->hw;
+       u32 aux0_sel_sdp[IGB_N_SDP] = {
+               AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
+       };
+       u32 aux1_sel_sdp[IGB_N_SDP] = {
+               AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
+       };
+       u32 ts_sdp_en[IGB_N_SDP] = {
+               TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
+       };
+       u32 ctrl, ctrl_ext, tssdp = 0;
+
+       ctrl = rd32(E1000_CTRL);
+       ctrl_ext = rd32(E1000_CTRL_EXT);
+       tssdp = rd32(E1000_TSSDP);
+
+       igb_pin_direction(pin, 1, &ctrl, &ctrl_ext);
+
+       /* Make sure this pin is not enabled as an output. */
+       tssdp &= ~ts_sdp_en[pin];
+
+       if (chan == 1) {
+               tssdp &= ~AUX1_SEL_SDP3;
+               tssdp |= aux1_sel_sdp[pin] | AUX1_TS_SDP_EN;
+       } else {
+               tssdp &= ~AUX0_SEL_SDP3;
+               tssdp |= aux0_sel_sdp[pin] | AUX0_TS_SDP_EN;
+       }
+
+       wr32(E1000_TSSDP, tssdp);
+       wr32(E1000_CTRL, ctrl);
+       wr32(E1000_CTRL_EXT, ctrl_ext);
+}
+
+static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
+{
+       struct e1000_hw *hw = &igb->hw;
+       u32 aux0_sel_sdp[IGB_N_SDP] = {
+               AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
+       };
+       u32 aux1_sel_sdp[IGB_N_SDP] = {
+               AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
+       };
+       u32 ts_sdp_en[IGB_N_SDP] = {
+               TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
+       };
+       u32 ts_sdp_sel_tt0[IGB_N_SDP] = {
+               TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0,
+               TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0,
+       };
+       u32 ts_sdp_sel_tt1[IGB_N_SDP] = {
+               TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
+               TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
+       };
+       u32 ts_sdp_sel_clr[IGB_N_SDP] = {
+               TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
+               TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
+       };
+       u32 ctrl, ctrl_ext, tssdp = 0;
+
+       ctrl = rd32(E1000_CTRL);
+       ctrl_ext = rd32(E1000_CTRL_EXT);
+       tssdp = rd32(E1000_TSSDP);
+
+       igb_pin_direction(pin, 0, &ctrl, &ctrl_ext);
+
+       /* Make sure this pin is not enabled as an input. */
+       if ((tssdp & AUX0_SEL_SDP3) == aux0_sel_sdp[pin])
+               tssdp &= ~AUX0_TS_SDP_EN;
+
+       if ((tssdp & AUX1_SEL_SDP3) == aux1_sel_sdp[pin])
+               tssdp &= ~AUX1_TS_SDP_EN;
+
+       tssdp &= ~ts_sdp_sel_clr[pin];
+       if (chan == 1)
+               tssdp |= ts_sdp_sel_tt1[pin];
+       else
+               tssdp |= ts_sdp_sel_tt0[pin];
+
+       tssdp |= ts_sdp_en[pin];
+
+       wr32(E1000_TSSDP, tssdp);
+       wr32(E1000_CTRL, ctrl);
+       wr32(E1000_CTRL_EXT, ctrl_ext);
+}
+
+static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
+                                      struct ptp_clock_request *rq, int on)
+{
+       struct igb_adapter *igb =
+               container_of(ptp, struct igb_adapter, ptp_caps);
+       struct e1000_hw *hw = &igb->hw;
+       u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
+       unsigned long flags;
+       struct timespec ts;
+       int pin;
+       s64 ns;
+
+       switch (rq->type) {
+       case PTP_CLK_REQ_EXTTS:
+               if (on) {
+                       pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
+                                          rq->extts.index);
+                       if (pin < 0)
+                               return -EBUSY;
+               }
+               if (rq->extts.index == 1) {
+                       tsauxc_mask = TSAUXC_EN_TS1;
+                       tsim_mask = TSINTR_AUTT1;
+               } else {
+                       tsauxc_mask = TSAUXC_EN_TS0;
+                       tsim_mask = TSINTR_AUTT0;
+               }
+               spin_lock_irqsave(&igb->tmreg_lock, flags);
+               tsauxc = rd32(E1000_TSAUXC);
+               tsim = rd32(E1000_TSIM);
+               if (on) {
+                       igb_pin_extts(igb, rq->extts.index, pin);
+                       tsauxc |= tsauxc_mask;
+                       tsim |= tsim_mask;
+               } else {
+                       tsauxc &= ~tsauxc_mask;
+                       tsim &= ~tsim_mask;
+               }
+               wr32(E1000_TSAUXC, tsauxc);
+               wr32(E1000_TSIM, tsim);
+               spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+               return 0;
+
+       case PTP_CLK_REQ_PEROUT:
+               if (on) {
+                       pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
+                                          rq->perout.index);
+                       if (pin < 0)
+                               return -EBUSY;
+               }
+               ts.tv_sec = rq->perout.period.sec;
+               ts.tv_nsec = rq->perout.period.nsec;
+               ns = timespec_to_ns(&ts);
+               ns = ns >> 1;
+               if (on && ns < 500000LL) {
+                       /* 2k interrupts per second is an awful lot. */
+                       return -EINVAL;
+               }
+               ts = ns_to_timespec(ns);
+               if (rq->perout.index == 1) {
+                       tsauxc_mask = TSAUXC_EN_TT1;
+                       tsim_mask = TSINTR_TT1;
+                       trgttiml = E1000_TRGTTIML1;
+                       trgttimh = E1000_TRGTTIMH1;
+               } else {
+                       tsauxc_mask = TSAUXC_EN_TT0;
+                       tsim_mask = TSINTR_TT0;
+                       trgttiml = E1000_TRGTTIML0;
+                       trgttimh = E1000_TRGTTIMH0;
+               }
+               spin_lock_irqsave(&igb->tmreg_lock, flags);
+               tsauxc = rd32(E1000_TSAUXC);
+               tsim = rd32(E1000_TSIM);
+               if (on) {
+                       int i = rq->perout.index;
+
+                       igb_pin_perout(igb, i, pin);
+                       igb->perout[i].start.tv_sec = rq->perout.start.sec;
+                       igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
+                       igb->perout[i].period.tv_sec = ts.tv_sec;
+                       igb->perout[i].period.tv_nsec = ts.tv_nsec;
+                       wr32(trgttiml, rq->perout.start.sec);
+                       wr32(trgttimh, rq->perout.start.nsec);
+                       tsauxc |= tsauxc_mask;
+                       tsim |= tsim_mask;
+               } else {
+                       tsauxc &= ~tsauxc_mask;
+                       tsim &= ~tsim_mask;
+               }
+               wr32(E1000_TSAUXC, tsauxc);
+               wr32(E1000_TSIM, tsim);
+               spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+               return 0;
+
+       case PTP_CLK_REQ_PPS:
+               spin_lock_irqsave(&igb->tmreg_lock, flags);
+               tsim = rd32(E1000_TSIM);
+               if (on)
+                       tsim |= TSINTR_SYS_WRAP;
+               else
+                       tsim &= ~TSINTR_SYS_WRAP;
+               wr32(E1000_TSIM, tsim);
+               spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+               return 0;
+       }
+
+       return -EOPNOTSUPP;
+}
+
 static int igb_ptp_feature_enable(struct ptp_clock_info *ptp,
                                  struct ptp_clock_request *rq, int on)
 {
        return -EOPNOTSUPP;
 }
 
+static int igb_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+                             enum ptp_pin_function func, unsigned int chan)
+{
+       switch (func) {
+       case PTP_PF_NONE:
+       case PTP_PF_EXTTS:
+       case PTP_PF_PEROUT:
+               break;
+       case PTP_PF_PHYSYNC:
+               return -1;
+       }
+       return 0;
+}
+
 /**
  * igb_ptp_tx_work
  * @work: pointer to work struct
@@ -751,6 +978,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
+       int i;
 
        switch (hw->mac.type) {
        case e1000_82576:
@@ -793,16 +1021,27 @@ void igb_ptp_init(struct igb_adapter *adapter)
                break;
        case e1000_i210:
        case e1000_i211:
+               for (i = 0; i < IGB_N_SDP; i++) {
+                       struct ptp_pin_desc *ppd = &adapter->sdp_config[i];
+
+                       snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i);
+                       ppd->index = i;
+                       ppd->func = PTP_PF_NONE;
+               }
                snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
                adapter->ptp_caps.owner = THIS_MODULE;
                adapter->ptp_caps.max_adj = 62499999;
-               adapter->ptp_caps.n_ext_ts = 0;
-               adapter->ptp_caps.pps = 0;
+               adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
+               adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
+               adapter->ptp_caps.n_pins = IGB_N_SDP;
+               adapter->ptp_caps.pps = 1;
+               adapter->ptp_caps.pin_config = adapter->sdp_config;
                adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
                adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
                adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
                adapter->ptp_caps.settime = igb_ptp_settime_i210;
-               adapter->ptp_caps.enable = igb_ptp_feature_enable;
+               adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
+               adapter->ptp_caps.verify = igb_ptp_verify_pin;
                /* Enable the timer functions by clearing bit 31. */
                wr32(E1000_TSAUXC, 0x0);
                break;
@@ -900,6 +1139,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)
 void igb_ptp_reset(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
+       unsigned long flags;
 
        if (!(adapter->flags & IGB_FLAG_PTP))
                return;
@@ -907,6 +1147,8 @@ void igb_ptp_reset(struct igb_adapter *adapter)
        /* reset the tstamp_config */
        igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
 
+       spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
        switch (adapter->hw.mac.type) {
        case e1000_82576:
                /* Dial the nominal frequency. */
@@ -917,23 +1159,25 @@ void igb_ptp_reset(struct igb_adapter *adapter)
        case e1000_i350:
        case e1000_i210:
        case e1000_i211:
-               /* Enable the timer functions and interrupts. */
                wr32(E1000_TSAUXC, 0x0);
+               wr32(E1000_TSSDP, 0x0);
                wr32(E1000_TSIM, TSYNC_INTERRUPTS);
                wr32(E1000_IMS, E1000_IMS_TS);
                break;
        default:
                /* No work to do. */
-               return;
+               goto out;
        }
 
        /* Re-initialize the timer. */
        if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
                struct timespec ts = ktime_to_timespec(ktime_get_real());
 
-               igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
+               igb_ptp_write_i210(adapter, &ts);
        } else {
                timecounter_init(&adapter->tc, &adapter->cc,
                                 ktime_to_ns(ktime_get_real()));
        }
+out:
+       spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
 }
index 63c807c9b21c0f7d68f330bab3be65f2d806f173..ebf9d4a42fdde33a2cd977fd6892f96ffec9b51b 100644 (file)
@@ -1907,7 +1907,8 @@ static void igbvf_watchdog_task(struct work_struct *work)
 
 static int igbvf_tso(struct igbvf_adapter *adapter,
                      struct igbvf_ring *tx_ring,
-                     struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+                    struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
+                    __be16 protocol)
 {
        struct e1000_adv_tx_context_desc *context_desc;
        struct igbvf_buffer *buffer_info;
@@ -1927,7 +1928,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
        l4len = tcp_hdrlen(skb);
        *hdr_len += l4len;
 
-       if (skb->protocol == htons(ETH_P_IP)) {
+       if (protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
@@ -1958,7 +1959,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
 
-       if (skb->protocol == htons(ETH_P_IP))
+       if (protocol == htons(ETH_P_IP))
                tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
        tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
 
@@ -1984,7 +1985,8 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
 
 static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
                                  struct igbvf_ring *tx_ring,
-                                 struct sk_buff *skb, u32 tx_flags)
+                                struct sk_buff *skb, u32 tx_flags,
+                                __be16 protocol)
 {
        struct e1000_adv_tx_context_desc *context_desc;
        unsigned int i;
@@ -2011,7 +2013,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
                tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
 
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       switch (skb->protocol) {
+                       switch (protocol) {
                        case htons(ETH_P_IP):
                                tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
                                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -2211,6 +2213,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
        u8 hdr_len = 0;
        int count = 0;
        int tso = 0;
+       __be16 protocol = vlan_get_protocol(skb);
 
        if (test_bit(__IGBVF_DOWN, &adapter->state)) {
                dev_kfree_skb_any(skb);
@@ -2234,18 +2237,19 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
                return NETDEV_TX_BUSY;
        }
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                tx_flags |= IGBVF_TX_FLAGS_VLAN;
-               tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
+               tx_flags |= (skb_vlan_tag_get(skb) <<
+                            IGBVF_TX_FLAGS_VLAN_SHIFT);
        }
 
-       if (skb->protocol == htons(ETH_P_IP))
+       if (protocol == htons(ETH_P_IP))
                tx_flags |= IGBVF_TX_FLAGS_IPV4;
 
        first = tx_ring->next_to_use;
 
        tso = skb_is_gso(skb) ?
-               igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
+               igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0;
        if (unlikely(tso < 0)) {
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
@@ -2253,7 +2257,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
 
        if (tso)
                tx_flags |= IGBVF_TX_FLAGS_TSO;
-       else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+       else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) &&
                 (skb->ip_summed == CHECKSUM_PARTIAL))
                tx_flags |= IGBVF_TX_FLAGS_CSUM;
 
index aa87605b144a23a98d3ed7121c035c8703ad4627..11a1bdbe3fd9dcdd7e3da5094e34c83d1c151b56 100644 (file)
@@ -1532,9 +1532,9 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                      DESC_NEEDED)))
                return NETDEV_TX_BUSY;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                tx_flags |= IXGB_TX_FLAGS_VLAN;
-               vlan_id = vlan_tx_tag_get(skb);
+               vlan_id = skb_vlan_tag_get(skb);
        }
 
        first = adapter->tx_ring.next_to_use;
index 38fc64cf5dca0de5cf4e7e47d1c16b0d51aad202..7dcbbec09a705153d1d385ed789db21f65da840d 100644 (file)
@@ -76,6 +76,8 @@
 #define IXGBE_MAX_RXD                     4096
 #define IXGBE_MIN_RXD                       64
 
+#define IXGBE_ETH_P_LLDP                0x88CC
+
 /* flow control */
 #define IXGBE_MIN_FCRTL                           0x40
 #define IXGBE_MAX_FCRTL                        0x7FF80
@@ -753,6 +755,7 @@ struct ixgbe_adapter {
        u32 timer_event_accumulator;
        u32 vferr_refcount;
        struct ixgbe_mac_addr *mac_table;
+       u16 vxlan_port;
        struct kobject *info_kobj;
 #ifdef CONFIG_IXGBE_HWMON
        struct hwmon_buff *ixgbe_hwmon_buff;
index 2ed2c7de230444f88c3f06451d7cc8a7167f5f05..70cc4c5c0a0130e82b44bd2aa597aca401e670de 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/if_bridge.h>
 #include <linux/prefetch.h>
 #include <scsi/fc/fc_fcoe.h>
+#include <net/vxlan.h>
 
 #ifdef CONFIG_OF
 #include <linux/of_net.h>
@@ -1396,12 +1397,23 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
                                     union ixgbe_adv_rx_desc *rx_desc,
                                     struct sk_buff *skb)
 {
+       __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+       __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
+       bool encap_pkt = false;
+
        skb_checksum_none_assert(skb);
 
        /* Rx csum disabled */
        if (!(ring->netdev->features & NETIF_F_RXCSUM))
                return;
 
+       if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) &&
+           (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
+               encap_pkt = true;
+               skb->encapsulation = 1;
+               skb->ip_summed = CHECKSUM_NONE;
+       }
+
        /* if IP and error */
        if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
            ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
@@ -1413,8 +1425,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
                return;
 
        if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
-               __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
-
                /*
                 * 82599 errata, UDP frames with a 0 checksum can be marked as
                 * checksum errors.
@@ -1429,6 +1439,17 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
 
        /* It must be a TCP or UDP packet with a valid checksum */
        skb->ip_summed = CHECKSUM_UNNECESSARY;
+       if (encap_pkt) {
+               if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
+                       return;
+
+               if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
+                       ring->rx_stats.csum_err++;
+                       return;
+               }
+               /* If we checked the outer header let the stack know */
+               skb->csum_level = 1;
+       }
 }
 
 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
@@ -3564,10 +3585,24 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
        /* Enable MAC Anti-Spoofing */
        hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
                                          adapter->num_vfs);
+
+       /* Ensure LLDP is set for Ethertype Antispoofing if we will be
+        * calling set_ethertype_anti_spoofing for each VF in loop below
+        */
+       if (hw->mac.ops.set_ethertype_anti_spoofing)
+               IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
+                               (IXGBE_ETQF_FILTER_EN    | /* enable filter */
+                                IXGBE_ETQF_TX_ANTISPOOF | /* tx antispoof */
+                                IXGBE_ETH_P_LLDP));       /* LLDP eth type */
+
        /* For VFs that have spoof checking turned off */
        for (i = 0; i < adapter->num_vfs; i++) {
                if (!adapter->vfinfo[i].spoofchk_enabled)
                        ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
+
+               /* enable ethertype anti spoofing if hw supports it */
+               if (hw->mac.ops.set_ethertype_anti_spoofing)
+                       hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
        }
 }
 
@@ -5627,6 +5662,10 @@ static int ixgbe_open(struct net_device *netdev)
 
        ixgbe_up_complete(adapter);
 
+#if IS_ENABLED(CONFIG_IXGBE_VXLAN)
+       vxlan_get_rx_port(netdev);
+
+#endif
        return 0;
 
 err_set_queues:
@@ -7217,8 +7256,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
        first->gso_segs = 1;
 
        /* if we have a HW VLAN tag being added default to the HW one */
-       if (vlan_tx_tag_present(skb)) {
-               tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
+       if (skb_vlan_tag_present(skb)) {
+               tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
        /* else if it is a SW VLAN check the next protocol and store the tag */
        } else if (protocol == htons(ETH_P_8021Q)) {
@@ -7227,11 +7266,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                if (!vhdr)
                        goto out_drop;
 
-               protocol = vhdr->h_vlan_encapsulated_proto;
                tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
                                  IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
        }
+       protocol = vlan_get_protocol(skb);
 
        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
            adapter->ptp_clock &&
@@ -7771,6 +7810,64 @@ static int ixgbe_set_features(struct net_device *netdev,
        return 0;
 }
 
+/**
+ * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * @dev: The port's netdev
+ * @sa_family: Socket Family that VXLAN is notifiying us about
+ * @port: New UDP port number that VXLAN started listening to
+ **/
+static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+                                __be16 port)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       u16 new_port = ntohs(port);
+
+       if (sa_family == AF_INET6)
+               return;
+
+       if (adapter->vxlan_port == new_port) {
+               netdev_info(dev, "Port %d already offloaded\n", new_port);
+               return;
+       }
+
+       if (adapter->vxlan_port) {
+               netdev_info(dev,
+                           "Hit Max num of UDP ports, not adding port %d\n",
+                           new_port);
+               return;
+       }
+
+       adapter->vxlan_port = new_port;
+       IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
+}
+
+/**
+ * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away
+ * @dev: The port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: UDP port number that VXLAN stopped listening to
+ **/
+static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+                                __be16 port)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       u16 new_port = ntohs(port);
+
+       if (sa_family == AF_INET6)
+               return;
+
+       if (adapter->vxlan_port != new_port) {
+               netdev_info(dev, "Port %d was not found, not deleting\n",
+                           new_port);
+               return;
+       }
+
+       adapter->vxlan_port = 0;
+       IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, 0);
+}
+
 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                             struct net_device *dev,
                             const unsigned char *addr, u16 vid,
@@ -7786,7 +7883,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 }
 
 static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
-                                   struct nlmsghdr *nlh)
+                                   struct nlmsghdr *nlh, u16 flags)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct nlattr *attr, *br_spec;
@@ -7982,6 +8079,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_bridge_getlink     = ixgbe_ndo_bridge_getlink,
        .ndo_dfwd_add_station   = ixgbe_fwd_add,
        .ndo_dfwd_del_station   = ixgbe_fwd_del,
+       .ndo_add_vxlan_port     = ixgbe_add_vxlan_port,
+       .ndo_del_vxlan_port     = ixgbe_del_vxlan_port,
 };
 
 /**
@@ -8339,6 +8438,15 @@ skip_sriov:
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->priv_flags |= IFF_SUPP_NOFCS;
 
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+               netdev->hw_enc_features |= NETIF_F_RXCSUM;
+               break;
+       default:
+               break;
+       }
+
 #ifdef CONFIG_IXGBE_DCB
        netdev->dcbnl_ops = &dcbnl_ops;
 #endif
index c76ba90ecc6ecc44a6899fa487eda911b5a52d5a..7f37fe7269a7360c0edbccfab6f8e7e03ff5a3b0 100644 (file)
@@ -101,9 +101,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
                        adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
                }
 
-               /* We do not support RSS w/ SR-IOV */
-               adapter->ring_feature[RING_F_RSS].limit = 1;
-
                /* Disable RSC when in SR-IOV mode */
                adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
                                     IXGBE_FLAG2_RSC_ENABLED);
@@ -1097,14 +1094,12 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
                                  u16 vlan, u8 qos)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       int err = 0;
+       int err;
 
-       if (adapter->vfinfo[vf].pf_vlan)
-               err = ixgbe_set_vf_vlan(adapter, false,
-                                       adapter->vfinfo[vf].pf_vlan,
-                                       vf);
+       err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
        if (err)
                goto out;
+
        ixgbe_set_vmvir(adapter, vlan, qos, vf);
        ixgbe_set_vmolr(hw, vf, false);
        if (adapter->vfinfo[vf].spoofchk_enabled)
@@ -1143,6 +1138,11 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
        hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
        if (adapter->vfinfo[vf].vlan_count)
                adapter->vfinfo[vf].vlan_count--;
+
+       /* disable hide VLAN on X550 */
+       if (hw->mac.type >= ixgbe_mac_X550)
+               ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
+
        adapter->vfinfo[vf].pf_vlan = 0;
        adapter->vfinfo[vf].pf_qos = 0;
 
index d101b25dc4b6fdc88abff20fc761ba56a3126385..fc5ecee56ca8f18dd3fbd77bf68101087da0aeec 100644 (file)
@@ -378,6 +378,8 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_SPOOF_MACAS_MASK          0xFF
 #define IXGBE_SPOOF_VLANAS_MASK         0xFF00
 #define IXGBE_SPOOF_VLANAS_SHIFT        8
+#define IXGBE_SPOOF_ETHERTYPEAS                0xFF000000
+#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT  16
 #define IXGBE_PFVFSPOOF_REG_COUNT       8
 
 #define IXGBE_DCA_TXCTRL(_i)    (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
@@ -399,6 +401,7 @@ struct ixgbe_thermal_sensor_data {
 
 #define IXGBE_WUPL      0x05900
 #define IXGBE_WUPM      0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+#define IXGBE_VXLANCTRL        0x0000507C /* Rx filter VXLAN UDPPORT Register */
 #define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
 #define IXGBE_FHFT_EXT(_n)     (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host
                                                            * Filter Table */
@@ -1540,6 +1543,7 @@ enum {
 #define IXGBE_MAX_ETQF_FILTERS  8
 #define IXGBE_ETQF_FCOE         0x08000000 /* bit 27 */
 #define IXGBE_ETQF_BCN          0x10000000 /* bit 28 */
+#define IXGBE_ETQF_TX_ANTISPOOF        0x20000000 /* bit 29 */
 #define IXGBE_ETQF_1588         0x40000000 /* bit 30 */
 #define IXGBE_ETQF_FILTER_EN    0x80000000 /* bit 31 */
 #define IXGBE_ETQF_POOL_ENABLE   (1 << 26) /* bit 26 */
@@ -1565,6 +1569,9 @@ enum {
 #define IXGBE_ETQF_FILTER_FCOE           2
 #define IXGBE_ETQF_FILTER_1588           3
 #define IXGBE_ETQF_FILTER_FIP            4
+#define IXGBE_ETQF_FILTER_LLDP          5
+#define IXGBE_ETQF_FILTER_LACP          6
+
 /* VLAN Control Bit Masks */
 #define IXGBE_VLNCTRL_VET       0x0000FFFF  /* bits 0-15 */
 #define IXGBE_VLNCTRL_CFI       0x10000000  /* bit 28 */
@@ -2122,6 +2129,7 @@ enum {
 #define IXGBE_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
 #define IXGBE_RXD_STAT_PIF      0x80    /* passed in-exact filter */
 #define IXGBE_RXD_STAT_CRCV     0x100   /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_OUTERIPCS  0x100 /* Cloud IP xsum calculated */
 #define IXGBE_RXD_STAT_VEXT     0x200   /* 1st VLAN found */
 #define IXGBE_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
 #define IXGBE_RXD_STAT_DYNINT   0x800   /* Pkt caused INT via DYNINT */
@@ -2139,6 +2147,7 @@ enum {
 #define IXGBE_RXD_ERR_IPE       0x80    /* IP Checksum Error */
 #define IXGBE_RXDADV_ERR_MASK           0xfff00000 /* RDESC.ERRORS mask */
 #define IXGBE_RXDADV_ERR_SHIFT          20         /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_OUTERIPER     0x04000000 /* CRC IP Header error */
 #define IXGBE_RXDADV_ERR_FCEOFE         0x80000000 /* FCoEFe/IPE */
 #define IXGBE_RXDADV_ERR_FCERR          0x00700000 /* FCERR/FDIRERR */
 #define IXGBE_RXDADV_ERR_FDIR_LEN       0x00100000 /* FDIR Length error */
@@ -2227,6 +2236,8 @@ enum {
 #define IXGBE_RXDADV_PKTTYPE_UDP        0x00000200 /* UDP hdr present */
 #define IXGBE_RXDADV_PKTTYPE_SCTP       0x00000400 /* SCTP hdr present */
 #define IXGBE_RXDADV_PKTTYPE_NFS        0x00000800 /* NFS hdr present */
+#define IXGBE_RXDADV_PKTTYPE_VXLAN     0x00000800 /* VXLAN hdr present */
+#define IXGBE_RXDADV_PKTTYPE_TUNNEL    0x00010000 /* Tunnel type */
 #define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP  0x00001000 /* IPSec ESP */
 #define IXGBE_RXDADV_PKTTYPE_IPSEC_AH   0x00002000 /* IPSec AH */
 #define IXGBE_RXDADV_PKTTYPE_LINKSEC    0x00004000 /* LinkSec Encap */
@@ -3056,6 +3067,7 @@ struct ixgbe_mac_operations {
        s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
        s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
        s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+       void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
 
        /* DMA Coalescing */
        s32 (*dmac_config)(struct ixgbe_hw *hw);
index ba54ff07b438cd1e42c33768f060334b7f310834..49395420c9b35ff2762d6dd584b9783146a8ee49 100644 (file)
@@ -55,9 +55,6 @@ s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
 {
        struct ixgbe_mac_info *mac = &hw->mac;
 
-       /* Call PHY identify routine to get the phy type */
-       ixgbe_identify_phy_generic(hw);
-
        mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
        mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
        mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
index ffdd1231f419a5e422b6d1bbc7b111a87650669f..50bf81908dd6839bc96f84a7e34e2e1c288af928 100644 (file)
@@ -80,7 +80,7 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
  *  ixgbe_hw struct in order to set up EEPROM access.
  **/
-s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
+static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
 {
        struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
        u32 eec;
@@ -110,8 +110,8 @@ s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
  *  @device_type: 3 bit device type
  *  @phy_data: Pointer to read data from the register
  **/
-s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
-                               u32 device_type, u32 *data)
+static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+                                      u32 device_type, u32 *data)
 {
        u32 i, command, error;
 
@@ -158,7 +158,8 @@ s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
  *
  *  Reads a 16 bit word from the EEPROM using the hostif.
  **/
-s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+                                         u16 *data)
 {
        s32 status;
        struct ixgbe_hic_read_shadow_ram buffer;
@@ -193,8 +194,8 @@ s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
  *
  *  Reads a 16 bit word(s) from the EEPROM using the hostif.
  **/
-s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
-                                    u16 offset, u16 words, u16 *data)
+static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+                                           u16 offset, u16 words, u16 *data)
 {
        struct ixgbe_hic_read_shadow_ram buffer;
        u32 current_word = 0;
@@ -331,7 +332,8 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
  *
  *  Returns a negative error code on error, or the 16-bit checksum
  **/
-s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
+static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
+                                   u32 buffer_size)
 {
        u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
        u16 *local_buffer;
@@ -407,7 +409,7 @@ s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
  *
  *  Returns a negative error code on error, or the 16-bit checksum
  **/
-s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
+static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
 {
        return ixgbe_calc_checksum_X550(hw, NULL, 0);
 }
@@ -419,7 +421,7 @@ s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
  *
  *   Reads a 16 bit word from the EEPROM using the hostif.
  **/
-s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
 {
        s32 status = 0;
 
@@ -440,7 +442,8 @@ s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
  *  Performs checksum calculation and validates the EEPROM checksum.  If the
  *  caller does not need checksum_val, the value can be NULL.
  **/
-s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
+static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
+                                              u16 *checksum_val)
 {
        s32 status;
        u16 checksum;
@@ -489,7 +492,8 @@ s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
  *
  *  Write a 16 bit word to the EEPROM using the hostif.
  **/
-s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+                                          u16 data)
 {
        s32 status;
        struct ixgbe_hic_write_shadow_ram buffer;
@@ -517,7 +521,7 @@ s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
  *
  *  Write a 16 bit word to the EEPROM using the hostif.
  **/
-s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
 {
        s32 status = 0;
 
@@ -537,7 +541,7 @@ s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
  *
  *  Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
  **/
-s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
+static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
 {
        s32 status = 0;
        union ixgbe_hic_hdr2 buffer;
@@ -560,7 +564,7 @@ s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
  *  checksum and updates the EEPROM and instructs the hardware to update
  *  the flash.
  **/
-s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
+static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
 {
        s32 status;
        u16 checksum = 0;
@@ -600,8 +604,9 @@ s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
  *
  *  Write a 16 bit word(s) to the EEPROM using the hostif.
  **/
-s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
-                                     u16 offset, u16 words, u16 *data)
+static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+                                            u16 offset, u16 words,
+                                            u16 *data)
 {
        s32 status = 0;
        u32 i = 0;
@@ -630,7 +635,7 @@ s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
 /** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
  *  @hw: pointer to hardware structure
  **/
-void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
+static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
 {
        struct ixgbe_mac_info *mac = &hw->mac;
 
@@ -647,7 +652,7 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
 /** ixgbe_setup_sfp_modules_X550em - Setup SFP module
  * @hw: pointer to hardware structure
  */
-s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
 {
        bool setup_linear;
        u16 reg_slice, edc_mode;
@@ -703,9 +708,9 @@ s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
  * @speed: pointer to link speed
  * @autoneg: true when autoneg or autotry is enabled
  **/
-s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
-                                      ixgbe_link_speed *speed,
-                                      bool *autoneg)
+static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+                                             ixgbe_link_speed *speed,
+                                             bool *autoneg)
 {
        /* SFP */
        if (hw->phy.media_type == ixgbe_media_type_fiber) {
@@ -740,8 +745,8 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
  *  @device_type: 3 bit device type
  *  @data: Data to write to the register
  **/
-s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
-                                u32 device_type, u32 data)
+static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+                                       u32 device_type, u32 data)
 {
        u32 i, command, error;
 
@@ -904,7 +909,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
  *
  *   Configures the integrated KX4 PHY.
  **/
-s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
 {
        s32 status;
        u32 reg_val;
@@ -942,7 +947,7 @@ s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
  *
  *   Configures the integrated KR PHY.
  **/
-s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
 {
        s32 status;
        u32 reg_val;
@@ -987,7 +992,7 @@ s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
  *  A return of a non-zero value indicates an error, and the base driver should
  *  not report link up.
  **/
-s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
 {
        u32 status;
        u16 lasi, autoneg_status, speed;
@@ -1049,7 +1054,7 @@ s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
  *  set during init_shared_code because the PHY/SFP type was
  *  not known.  Perform the SFP init if necessary.
  **/
-s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
 {
        struct ixgbe_phy_info *phy = &hw->phy;
        s32 ret_val;
@@ -1102,7 +1107,7 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
  *  Returns the media type (fiber, copper, backplane)
  *
  */
-enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
+static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
 {
        enum ixgbe_media_type media_type;
 
@@ -1129,7 +1134,7 @@ enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
 /** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
  ** @hw: pointer to hardware structure
  **/
-s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
+static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
 {
        u32 status;
        u16 reg;
@@ -1202,7 +1207,7 @@ s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
  **  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
  **  reset.
  **/
-s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
 {
        ixgbe_link_speed link_speed;
        s32 status;
@@ -1295,6 +1300,28 @@ mac_reset_top:
        return status;
 }
 
+/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype
+ *     anti-spoofing
+ *  @hw:  pointer to hardware structure
+ *  @enable: enable or disable switch for Ethertype anti-spoofing
+ *  @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
+ **/
+void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable,
+                                           int vf)
+{
+       int vf_target_reg = vf >> 3;
+       int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
+       u32 pfvfspoof;
+
+       pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+       if (enable)
+               pfvfspoof |= (1 << vf_target_shift);
+       else
+               pfvfspoof &= ~(1 << vf_target_shift);
+
+       IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
+}
+
 #define X550_COMMON_MAC \
        .init_hw                        = &ixgbe_init_hw_generic, \
        .start_hw                       = &ixgbe_start_hw_X540, \
@@ -1329,6 +1356,8 @@ mac_reset_top:
        .init_uta_tables                = &ixgbe_init_uta_tables_generic, \
        .set_mac_anti_spoofing          = &ixgbe_set_mac_anti_spoofing, \
        .set_vlan_anti_spoofing         = &ixgbe_set_vlan_anti_spoofing, \
+       .set_ethertype_anti_spoofing    = \
+                               &ixgbe_set_ethertype_anti_spoofing_X550, \
        .acquire_swfw_sync              = &ixgbe_acquire_swfw_sync_X540, \
        .release_swfw_sync              = &ixgbe_release_swfw_sync_X540, \
        .disable_rx_buff                = &ixgbe_disable_rx_buff_generic, \
@@ -1345,7 +1374,6 @@ static struct ixgbe_mac_operations mac_ops_X550 = {
        .get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic,
        .get_wwn_prefix         = &ixgbe_get_wwn_prefix_generic,
        .setup_link             = &ixgbe_setup_mac_link_X540,
-       .set_rxpba              = &ixgbe_set_rxpba_generic,
        .get_link_capabilities  = &ixgbe_get_copper_link_capabilities_generic,
        .setup_sfp              = NULL,
 };
index 8c44ab25f3fa7100b3c0c351c75b0733b4c6bcd6..3a9b356dff014b3be96d3fba3e42d52a91d042be 100644 (file)
 #define BP_EXTENDED_STATS
 #endif
 
+#define IXGBE_MAX_TXD_PWR      14
+#define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
 /* wrapper around a pointer to a socket buffer,
  * so a DMA handle can be stored along with the buffer */
 struct ixgbevf_tx_buffer {
@@ -85,6 +92,18 @@ struct ixgbevf_rx_queue_stats {
        u64 csum_err;
 };
 
+enum ixgbevf_ring_state_t {
+       __IXGBEVF_TX_DETECT_HANG,
+       __IXGBEVF_HANG_CHECK_ARMED,
+};
+
+#define check_for_tx_hang(ring) \
+       test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+       set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+       clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
+
 struct ixgbevf_ring {
        struct ixgbevf_ring *next;
        struct net_device *netdev;
@@ -101,7 +120,7 @@ struct ixgbevf_ring {
                struct ixgbevf_tx_buffer *tx_buffer_info;
                struct ixgbevf_rx_buffer *rx_buffer_info;
        };
-
+       unsigned long state;
        struct ixgbevf_stats stats;
        struct u64_stats_sync syncp;
        union {
@@ -124,6 +143,7 @@ struct ixgbevf_ring {
 
 #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
 #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
+#define IXGBEVF_MAX_RSS_QUEUES 2
 
 #define IXGBEVF_DEFAULT_TXD   1024
 #define IXGBEVF_DEFAULT_RXD   512
@@ -347,8 +367,6 @@ struct ixgbevf_adapter {
        /* this field must be first, see ixgbevf_process_skb_fields */
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 
-       struct timer_list watchdog_timer;
-       struct work_struct reset_task;
        struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
 
        /* Interrupt Throttle Rate */
@@ -378,8 +396,7 @@ struct ixgbevf_adapter {
         * thus the additional *_CAPABLE flags.
         */
        u32 flags;
-#define IXGBE_FLAG_IN_WATCHDOG_TASK             (u32)(1)
-
+#define IXGBEVF_FLAG_RESET_REQUESTED           (u32)(1)
 #define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED     (u32)(1 << 2)
 
        struct msix_entry *msix_entries;
@@ -415,9 +432,11 @@ struct ixgbevf_adapter {
        u32 link_speed;
        bool link_up;
 
-       spinlock_t mbx_lock;
+       struct timer_list service_timer;
+       struct work_struct service_task;
 
-       struct work_struct watchdog_task;
+       spinlock_t mbx_lock;
+       unsigned long last_reset;
 };
 
 enum ixbgevf_state_t {
@@ -426,7 +445,8 @@ enum ixbgevf_state_t {
        __IXGBEVF_DOWN,
        __IXGBEVF_DISABLED,
        __IXGBEVF_REMOVING,
-       __IXGBEVF_WORK_INIT,
+       __IXGBEVF_SERVICE_SCHED,
+       __IXGBEVF_SERVICE_INITED,
 };
 
 enum ixgbevf_boards {
index 62a0d8e0f17da5ce75d48f9ff39fca5354c4447f..4186981e562d0c395825785aa07f32d15fab324f 100644 (file)
@@ -98,6 +98,23 @@ static int debug = -1;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
+static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
+{
+       if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+           !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
+           !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
+               schedule_work(&adapter->service_task);
+}
+
+static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
+{
+       BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
+
+       /* flush memory to make sure state is correct before next watchdog */
+       smp_mb__before_atomic();
+       clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
+}
+
 /* forward decls */
 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
@@ -111,8 +128,8 @@ static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
                return;
        hw->hw_addr = NULL;
        dev_err(&adapter->pdev->dev, "Adapter removed\n");
-       if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
-               schedule_work(&adapter->watchdog_task);
+       if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
+               ixgbevf_service_event_schedule(adapter);
 }
 
 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
@@ -199,14 +216,72 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
        /* tx_buffer must be completely set up in the transmit path */
 }
 
-#define IXGBE_MAX_TXD_PWR      14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
+{
+       return ring->stats.packets;
+}
+
+static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
+{
+       struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
+       u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
+
+       if (head != tail)
+               return (head < tail) ?
+                       tail - head : (tail + ring->count - head);
+
+       return 0;
+}
+
+static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
+{
+       u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
+       u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
+       u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
+
+       clear_check_for_tx_hang(tx_ring);
+
+       /* Check for a hung queue, but be thorough. This verifies
+        * that a transmit has been completed since the previous
+        * check AND there is at least one packet pending. The
+        * ARMED bit is set to indicate a potential hang.
+        */
+       if ((tx_done_old == tx_done) && tx_pending) {
+               /* make sure it is true for two checks in a row */
+               return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
+                                       &tx_ring->state);
+       }
+       /* reset the countdown */
+       clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
+
+       /* update completed stats and continue */
+       tx_ring->tx_stats.tx_done_old = tx_done;
+
+       return false;
+}
+
+static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
+{
+       /* Do the reset outside of interrupt context */
+       if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+               adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
+               ixgbevf_service_event_schedule(adapter);
+       }
+}
 
-/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+/**
+ * ixgbevf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void ixgbevf_tx_timeout(struct net_device *netdev)
+{
+       struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
-static void ixgbevf_tx_timeout(struct net_device *netdev);
+       ixgbevf_tx_timeout_reset(adapter);
+}
 
 /**
  * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
@@ -311,6 +386,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
        q_vector->tx.total_bytes += total_bytes;
        q_vector->tx.total_packets += total_packets;
 
+       if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
+               struct ixgbe_hw *hw = &adapter->hw;
+               union ixgbe_adv_tx_desc *eop_desc;
+
+               eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
+
+               pr_err("Detected Tx Unit Hang\n"
+                      "  Tx Queue             <%d>\n"
+                      "  TDH, TDT             <%x>, <%x>\n"
+                      "  next_to_use          <%x>\n"
+                      "  next_to_clean        <%x>\n"
+                      "tx_buffer_info[next_to_clean]\n"
+                      "  next_to_watch        <%p>\n"
+                      "  eop_desc->wb.status  <%x>\n"
+                      "  time_stamp           <%lx>\n"
+                      "  jiffies              <%lx>\n",
+                      tx_ring->queue_index,
+                      IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
+                      IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
+                      tx_ring->next_to_use, i,
+                      eop_desc, (eop_desc ? eop_desc->wb.status : 0),
+                      tx_ring->tx_buffer_info[i].time_stamp, jiffies);
+
+               netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+               /* schedule immediate reset if we believe we hung */
+               ixgbevf_tx_timeout_reset(adapter);
+
+               return true;
+       }
+
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
        if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
                     (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -1158,9 +1264,7 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
 
        hw->mac.get_link_status = 1;
 
-       if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
-           !test_bit(__IXGBEVF_REMOVING, &adapter->state))
-               mod_timer(&adapter->watchdog_timer, jiffies);
+       ixgbevf_service_event_schedule(adapter);
 
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
 
@@ -1479,6 +1583,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
        txdctl |= (1 << 8) |    /* HTHRESH = 1 */
                  32;          /* PTHRESH = 32 */
 
+       clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
+
        IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
 
        /* poll to verify queue is enabled */
@@ -1584,6 +1690,39 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
                       reg_idx);
 }
 
+static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 vfmrqc = 0, vfreta = 0;
+       u32 rss_key[10];
+       u16 rss_i = adapter->num_rx_queues;
+       int i, j;
+
+       /* Fill out hash function seeds */
+       netdev_rss_key_fill(rss_key, sizeof(rss_key));
+       for (i = 0; i < 10; i++)
+               IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
+
+       /* Fill out redirection table */
+       for (i = 0, j = 0; i < 64; i++, j++) {
+               if (j == rss_i)
+                       j = 0;
+               vfreta = (vfreta << 8) | (j * 0x1);
+               if ((i & 3) == 3)
+                       IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
+       }
+
+       /* Perform hash on these packet types */
+       vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
+               IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
+               IXGBE_VFMRQC_RSS_FIELD_IPV6 |
+               IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
+
+       vfmrqc |= IXGBE_VFMRQC_RSSEN;
+
+       IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
+}
+
 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
                                      struct ixgbevf_ring *ring)
 {
@@ -1640,6 +1779,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
 
        ixgbevf_setup_psrtype(adapter);
+       if (hw->mac.type >= ixgbe_mac_X550_vf)
+               ixgbevf_setup_vfmrqc(adapter);
 
        /* notify the PF of our intent to use this size of frame */
        ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
@@ -1794,7 +1935,8 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        unsigned int def_q = 0;
        unsigned int num_tcs = 0;
-       unsigned int num_rx_queues = 1;
+       unsigned int num_rx_queues = adapter->num_rx_queues;
+       unsigned int num_tx_queues = adapter->num_tx_queues;
        int err;
 
        spin_lock_bh(&adapter->mbx_lock);
@@ -1808,6 +1950,9 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
                return err;
 
        if (num_tcs > 1) {
+               /* we need only one Tx queue */
+               num_tx_queues = 1;
+
                /* update default Tx ring register index */
                adapter->tx_ring[0]->reg_idx = def_q;
 
@@ -1816,7 +1961,8 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
        }
 
        /* if we have a bad config abort request queue reset */
-       if (adapter->num_rx_queues != num_rx_queues) {
+       if ((adapter->num_rx_queues != num_rx_queues) ||
+           (adapter->num_tx_queues != num_tx_queues)) {
                /* force mailbox timeout to prevent further messages */
                hw->mbx.timeout = 0;
 
@@ -1917,6 +2063,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
        clear_bit(__IXGBEVF_DOWN, &adapter->state);
        ixgbevf_napi_enable_all(adapter);
 
+       /* clear any pending interrupts, may auto mask */
+       IXGBE_READ_REG(hw, IXGBE_VTEICR);
+       ixgbevf_irq_enable(adapter);
+
        /* enable transmits */
        netif_tx_start_all_queues(netdev);
 
@@ -1924,21 +2074,14 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
        ixgbevf_init_last_counter_stats(adapter);
 
        hw->mac.get_link_status = 1;
-       mod_timer(&adapter->watchdog_timer, jiffies);
+       mod_timer(&adapter->service_timer, jiffies);
 }
 
 void ixgbevf_up(struct ixgbevf_adapter *adapter)
 {
-       struct ixgbe_hw *hw = &adapter->hw;
-
        ixgbevf_configure(adapter);
 
        ixgbevf_up_complete(adapter);
-
-       /* clear any pending interrupts, may auto mask */
-       IXGBE_READ_REG(hw, IXGBE_VTEICR);
-
-       ixgbevf_irq_enable(adapter);
 }
 
 /**
@@ -2045,22 +2188,19 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
        for (i = 0; i < adapter->num_rx_queues; i++)
                ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
 
-       netif_tx_disable(netdev);
-
-       msleep(10);
+       usleep_range(10000, 20000);
 
        netif_tx_stop_all_queues(netdev);
 
+       /* call carrier off first to avoid false dev_watchdog timeouts */
+       netif_carrier_off(netdev);
+       netif_tx_disable(netdev);
+
        ixgbevf_irq_disable(adapter);
 
        ixgbevf_napi_disable_all(adapter);
 
-       del_timer_sync(&adapter->watchdog_timer);
-       /* can't call flush scheduled work here because it can deadlock
-        * if linkwatch_event tries to acquire the rtnl_lock which we are
-        * holding */
-       while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
-               msleep(1);
+       del_timer_sync(&adapter->service_timer);
 
        /* disable transmits in the hardware now that interrupts are off */
        for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -2070,8 +2210,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
                                IXGBE_TXDCTL_SWFLSH);
        }
 
-       netif_carrier_off(netdev);
-
        if (!pci_channel_offline(adapter->pdev))
                ixgbevf_reset(adapter);
 
@@ -2110,6 +2248,8 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
                memcpy(netdev->perm_addr, adapter->hw.mac.addr,
                       netdev->addr_len);
        }
+
+       adapter->last_reset = jiffies;
 }
 
 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
@@ -2181,8 +2321,19 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
                return;
 
        /* we need as many queues as traffic classes */
-       if (num_tcs > 1)
+       if (num_tcs > 1) {
                adapter->num_rx_queues = num_tcs;
+       } else {
+               u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
+
+               switch (hw->api_version) {
+               case ixgbe_mbox_api_11:
+                       adapter->num_rx_queues = rss;
+                       adapter->num_tx_queues = rss;
+               default:
+                       break;
+               }
+       }
 }
 
 /**
@@ -2552,7 +2703,8 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        int i;
 
-       if (!adapter->link_up)
+       if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+           test_bit(__IXGBEVF_RESETTING, &adapter->state))
                return;
 
        UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
@@ -2576,79 +2728,176 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
 }
 
 /**
- * ixgbevf_watchdog - Timer Call-back
+ * ixgbevf_service_timer - Timer Call-back
  * @data: pointer to adapter cast into an unsigned long
  **/
-static void ixgbevf_watchdog(unsigned long data)
+static void ixgbevf_service_timer(unsigned long data)
 {
        struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
+
+       /* Reset the timer */
+       mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
+
+       ixgbevf_service_event_schedule(adapter);
+}
+
+static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
+{
+       if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
+               return;
+
+       adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
+
+       /* If we're already down or resetting, just bail */
+       if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+           test_bit(__IXGBEVF_RESETTING, &adapter->state))
+               return;
+
+       adapter->tx_timeout_count++;
+
+       ixgbevf_reinit_locked(adapter);
+}
+
+/* ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
+ * @adapter - pointer to the device adapter structure
+ *
+ * This function serves two purposes.  First it strobes the interrupt lines
+ * in order to make certain interrupts are occurring.  Secondly it sets the
+ * bits needed to check for TX hangs.  As a result we should immediately
+ * determine if a hang has occurred.
+ */
+static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
+{
        struct ixgbe_hw *hw = &adapter->hw;
        u32 eics = 0;
        int i;
 
-       /*
-        * Do the watchdog outside of interrupt context due to the lovely
-        * delays that some of the newer hardware requires
-        */
+       /* If we're down or resetting, just bail */
+       if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+           test_bit(__IXGBEVF_RESETTING, &adapter->state))
+               return;
 
-       if (test_bit(__IXGBEVF_DOWN, &adapter->state))
-               goto watchdog_short_circuit;
+       /* Force detection of hung controller */
+       if (netif_carrier_ok(adapter->netdev)) {
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       set_check_for_tx_hang(adapter->tx_ring[i]);
+       }
 
        /* get one bit for every active tx/rx interrupt vector */
        for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
                struct ixgbevf_q_vector *qv = adapter->q_vector[i];
+
                if (qv->rx.ring || qv->tx.ring)
                        eics |= 1 << i;
        }
 
+       /* Cause software interrupt to ensure rings are cleaned */
        IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
+}
 
-watchdog_short_circuit:
-       schedule_work(&adapter->watchdog_task);
+/**
+ * ixgbevf_watchdog_update_link - update the link status
+ * @adapter - pointer to the device adapter structure
+ **/
+static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 link_speed = adapter->link_speed;
+       bool link_up = adapter->link_up;
+       s32 err;
+
+       spin_lock_bh(&adapter->mbx_lock);
+
+       err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+
+       spin_unlock_bh(&adapter->mbx_lock);
+
+       /* if check for link returns error we will need to reset */
+       if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
+               adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
+               link_up = false;
+       }
+
+       adapter->link_up = link_up;
+       adapter->link_speed = link_speed;
 }
 
 /**
- * ixgbevf_tx_timeout - Respond to a Tx Hang
- * @netdev: network interface device structure
+ * ixgbevf_watchdog_link_is_up - update netif_carrier status and
+ *                              print link up message
+ * @adapter - pointer to the device adapter structure
  **/
-static void ixgbevf_tx_timeout(struct net_device *netdev)
+static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
 {
-       struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+       struct net_device *netdev = adapter->netdev;
 
-       /* Do the reset outside of interrupt context */
-       schedule_work(&adapter->reset_task);
+       /* only continue if link was previously down */
+       if (netif_carrier_ok(netdev))
+               return;
+
+       dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
+                (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+                "10 Gbps" :
+                (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
+                "1 Gbps" :
+                (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
+                "100 Mbps" :
+                "unknown speed");
+
+       netif_carrier_on(netdev);
 }
 
-static void ixgbevf_reset_task(struct work_struct *work)
+/**
+ * ixgbevf_watchdog_link_is_down - update netif_carrier status and
+ *                                print link down message
+ * @adapter - pointer to the adapter structure
+ **/
+static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
 {
-       struct ixgbevf_adapter *adapter;
-       adapter = container_of(work, struct ixgbevf_adapter, reset_task);
+       struct net_device *netdev = adapter->netdev;
 
-       /* If we're already down or resetting, just bail */
+       adapter->link_speed = 0;
+
+       /* only continue if link was up previously */
+       if (!netif_carrier_ok(netdev))
+               return;
+
+       dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
+
+       netif_carrier_off(netdev);
+}
+
+/**
+ * ixgbevf_watchdog_subtask - worker thread to bring link up
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
+{
+       /* if interface is down do nothing */
        if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
-           test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
            test_bit(__IXGBEVF_RESETTING, &adapter->state))
                return;
 
-       adapter->tx_timeout_count++;
+       ixgbevf_watchdog_update_link(adapter);
 
-       ixgbevf_reinit_locked(adapter);
+       if (adapter->link_up)
+               ixgbevf_watchdog_link_is_up(adapter);
+       else
+               ixgbevf_watchdog_link_is_down(adapter);
+
+       ixgbevf_update_stats(adapter);
 }
 
 /**
- * ixgbevf_watchdog_task - worker thread to bring link up
+ * ixgbevf_service_task - manages and runs subtasks
  * @work: pointer to work_struct containing our data
  **/
-static void ixgbevf_watchdog_task(struct work_struct *work)
+static void ixgbevf_service_task(struct work_struct *work)
 {
        struct ixgbevf_adapter *adapter = container_of(work,
                                                       struct ixgbevf_adapter,
-                                                      watchdog_task);
-       struct net_device *netdev = adapter->netdev;
+                                                      service_task);
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 link_speed = adapter->link_speed;
-       bool link_up = adapter->link_up;
-       s32 need_reset;
 
        if (IXGBE_REMOVED(hw->hw_addr)) {
                if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
@@ -2658,73 +2907,13 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
                }
                return;
        }
-       ixgbevf_queue_reset_subtask(adapter);
-
-       adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
-
-       /*
-        * Always check the link on the watchdog because we have
-        * no LSC interrupt
-        */
-       spin_lock_bh(&adapter->mbx_lock);
-
-       need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
-
-       spin_unlock_bh(&adapter->mbx_lock);
-
-       if (need_reset) {
-               adapter->link_up = link_up;
-               adapter->link_speed = link_speed;
-               netif_carrier_off(netdev);
-               netif_tx_stop_all_queues(netdev);
-               schedule_work(&adapter->reset_task);
-               goto pf_has_reset;
-       }
-       adapter->link_up = link_up;
-       adapter->link_speed = link_speed;
-
-       if (link_up) {
-               if (!netif_carrier_ok(netdev)) {
-                       char *link_speed_string;
-                       switch (link_speed) {
-                       case IXGBE_LINK_SPEED_10GB_FULL:
-                               link_speed_string = "10 Gbps";
-                               break;
-                       case IXGBE_LINK_SPEED_1GB_FULL:
-                               link_speed_string = "1 Gbps";
-                               break;
-                       case IXGBE_LINK_SPEED_100_FULL:
-                               link_speed_string = "100 Mbps";
-                               break;
-                       default:
-                               link_speed_string = "unknown speed";
-                               break;
-                       }
-                       dev_info(&adapter->pdev->dev,
-                               "NIC Link is Up, %s\n", link_speed_string);
-                       netif_carrier_on(netdev);
-                       netif_tx_wake_all_queues(netdev);
-               }
-       } else {
-               adapter->link_up = false;
-               adapter->link_speed = 0;
-               if (netif_carrier_ok(netdev)) {
-                       dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
-                       netif_carrier_off(netdev);
-                       netif_tx_stop_all_queues(netdev);
-               }
-       }
 
-       ixgbevf_update_stats(adapter);
-
-pf_has_reset:
-       /* Reset the timer */
-       if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
-           !test_bit(__IXGBEVF_REMOVING, &adapter->state))
-               mod_timer(&adapter->watchdog_timer,
-                         round_jiffies(jiffies + (2 * HZ)));
+       ixgbevf_queue_reset_subtask(adapter);
+       ixgbevf_reset_subtask(adapter);
+       ixgbevf_watchdog_subtask(adapter);
+       ixgbevf_check_hang_subtask(adapter);
 
-       adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+       ixgbevf_service_event_complete(adapter);
 }
 
 /**
@@ -2944,10 +3133,6 @@ static int ixgbevf_open(struct net_device *netdev)
        if (!adapter->num_msix_vectors)
                return -ENOMEM;
 
-       /* disallow open during test */
-       if (test_bit(__IXGBEVF_TESTING, &adapter->state))
-               return -EBUSY;
-
        if (hw->adapter_stopped) {
                ixgbevf_reset(adapter);
                /* if adapter is still stopped then PF isn't up and
@@ -2960,6 +3145,12 @@ static int ixgbevf_open(struct net_device *netdev)
                }
        }
 
+       /* disallow open during test */
+       if (test_bit(__IXGBEVF_TESTING, &adapter->state))
+               return -EBUSY;
+
+       netif_carrier_off(netdev);
+
        /* allocate transmit descriptors */
        err = ixgbevf_setup_all_tx_resources(adapter);
        if (err)
@@ -2979,15 +3170,11 @@ static int ixgbevf_open(struct net_device *netdev)
         */
        ixgbevf_map_rings_to_vectors(adapter);
 
-       ixgbevf_up_complete(adapter);
-
-       /* clear any pending interrupts, may auto mask */
-       IXGBE_READ_REG(hw, IXGBE_VTEICR);
        err = ixgbevf_request_irq(adapter);
        if (err)
                goto err_req_irq;
 
-       ixgbevf_irq_enable(adapter);
+       ixgbevf_up_complete(adapter);
 
        return 0;
 
@@ -3099,7 +3286,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
 
-       if (skb->protocol == htons(ETH_P_IP)) {
+       if (first->protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
@@ -3156,7 +3343,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                u8 l4_hdr = 0;
-               switch (skb->protocol) {
+               switch (first->protocol) {
                case htons(ETH_P_IP):
                        vlan_macip_lens |= skb_network_header_len(skb);
                        type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
@@ -3452,8 +3639,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        first->bytecount = skb->len;
        first->gso_segs = 1;
 
-       if (vlan_tx_tag_present(skb)) {
-               tx_flags |= vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               tx_flags |= skb_vlan_tag_get(skb);
                tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_VLAN;
        }
@@ -3822,28 +4009,28 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                           NETIF_F_HW_VLAN_CTAG_RX |
                           NETIF_F_HW_VLAN_CTAG_FILTER;
 
-       netdev->vlan_features |= NETIF_F_TSO;
-       netdev->vlan_features |= NETIF_F_TSO6;
-       netdev->vlan_features |= NETIF_F_IP_CSUM;
-       netdev->vlan_features |= NETIF_F_IPV6_CSUM;
-       netdev->vlan_features |= NETIF_F_SG;
+       netdev->vlan_features |= NETIF_F_TSO |
+                                NETIF_F_TSO6 |
+                                NETIF_F_IP_CSUM |
+                                NETIF_F_IPV6_CSUM |
+                                NETIF_F_SG;
 
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
        netdev->priv_flags |= IFF_UNICAST_FLT;
 
-       init_timer(&adapter->watchdog_timer);
-       adapter->watchdog_timer.function = ixgbevf_watchdog;
-       adapter->watchdog_timer.data = (unsigned long)adapter;
-
        if (IXGBE_REMOVED(hw->hw_addr)) {
                err = -EIO;
                goto err_sw_init;
        }
-       INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
-       INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
-       set_bit(__IXGBEVF_WORK_INIT, &adapter->state);
+
+       setup_timer(&adapter->service_timer, &ixgbevf_service_timer,
+                   (unsigned long)adapter);
+
+       INIT_WORK(&adapter->service_task, ixgbevf_service_task);
+       set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
+       clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
 
        err = ixgbevf_init_interrupt_scheme(adapter);
        if (err)
@@ -3917,11 +4104,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
        adapter = netdev_priv(netdev);
 
        set_bit(__IXGBEVF_REMOVING, &adapter->state);
-
-       del_timer_sync(&adapter->watchdog_timer);
-
-       cancel_work_sync(&adapter->reset_task);
-       cancel_work_sync(&adapter->watchdog_task);
+       cancel_work_sync(&adapter->service_task);
 
        if (netdev->reg_state == NETREG_REGISTERED)
                unregister_netdev(netdev);
@@ -3955,7 +4138,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
-       if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
+       if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
                return PCI_ERS_RESULT_DISCONNECT;
 
        rtnl_lock();
index 09dd8f698beacc4b91979f149354352b586ffe74..3e712fd6e695e5b83ec1a45951968548314f63bf 100644 (file)
 #define IXGBE_VFGOTC_LSB       0x02020
 #define IXGBE_VFGOTC_MSB       0x02024
 #define IXGBE_VFMPRC           0x01034
+#define IXGBE_VFMRQC           0x3000
+#define IXGBE_VFRSSRK(x)       (0x3100 + ((x) * 4))
+#define IXGBE_VFRETA(x)        (0x3200 + ((x) * 4))
+
+/* VFMRQC bits */
+#define IXGBE_VFMRQC_RSSEN              0x00000001  /* RSS Enable */
+#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define IXGBE_VFMRQC_RSS_FIELD_IPV4     0x00020000
+#define IXGBE_VFMRQC_RSS_FIELD_IPV6     0x00100000
+#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000
 
 #define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
 
index 44ce7d88f554638035090cf165f0ffdbf1b2902f..6e9a792097d315891af8c36812e38fce15e71640 100644 (file)
@@ -2154,9 +2154,9 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
 static inline void
 jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
 {
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                *flags |= TXFLAG_TAGON;
-               *vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+               *vlan = cpu_to_le16(skb_vlan_tag_get(skb));
        }
 }
 
index a62fc38f045e1b802730c95484c0b60a340ec1c6..1c75829eb1668fe094af3a9049fb53bb0c8b4bb5 100644 (file)
@@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4";
 #define IS_TSO_HEADER(txq, addr) \
        ((addr >= txq->tso_hdrs_dma) && \
         (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
+
+#define DESC_DMA_MAP_SINGLE 0
+#define DESC_DMA_MAP_PAGE 1
+
 /*
  * RX/TX descriptors.
  */
@@ -362,6 +366,7 @@ struct tx_queue {
        dma_addr_t tso_hdrs_dma;
 
        struct tx_desc *tx_desc_area;
+       char *tx_desc_mapping; /* array to track the type of the dma mapping */
        dma_addr_t tx_desc_dma;
        int tx_desc_area_size;
 
@@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
        if (txq->tx_curr_desc == txq->tx_ring_size)
                txq->tx_curr_desc = 0;
        desc = &txq->tx_desc_area[tx_index];
+       txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
 
        desc->l4i_chk = 0;
        desc->byte_cnt = length;
@@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
                skb_frag_t *this_frag;
                int tx_index;
                struct tx_desc *desc;
-               void *addr;
 
                this_frag = &skb_shinfo(skb)->frags[frag];
-               addr = page_address(this_frag->page.p) + this_frag->page_offset;
                tx_index = txq->tx_curr_desc++;
                if (txq->tx_curr_desc == txq->tx_ring_size)
                        txq->tx_curr_desc = 0;
                desc = &txq->tx_desc_area[tx_index];
+               txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
 
                /*
                 * The last fragment will generate an interrupt
@@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
 
                desc->l4i_chk = 0;
                desc->byte_cnt = skb_frag_size(this_frag);
-               desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
-                                              desc->byte_cnt, DMA_TO_DEVICE);
+               desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
+                                                this_frag, 0, desc->byte_cnt,
+                                                DMA_TO_DEVICE);
        }
 }
 
@@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
        if (txq->tx_curr_desc == txq->tx_ring_size)
                txq->tx_curr_desc = 0;
        desc = &txq->tx_desc_area[tx_index];
+       txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
 
        if (nr_frags) {
                txq_submit_frag_skb(txq, skb);
@@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                int tx_index;
                struct tx_desc *desc;
                u32 cmd_sts;
+               char desc_dma_map;
 
                tx_index = txq->tx_used_desc;
                desc = &txq->tx_desc_area[tx_index];
+               desc_dma_map = txq->tx_desc_mapping[tx_index];
+
                cmd_sts = desc->cmd_sts;
 
                if (cmd_sts & BUFFER_OWNED_BY_DMA) {
@@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                reclaimed++;
                txq->tx_desc_count--;
 
-               if (!IS_TSO_HEADER(txq, desc->buf_ptr))
-                       dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
-                                        desc->byte_cnt, DMA_TO_DEVICE);
+               if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
+
+                       if (desc_dma_map == DESC_DMA_MAP_PAGE)
+                               dma_unmap_page(mp->dev->dev.parent,
+                                              desc->buf_ptr,
+                                              desc->byte_cnt,
+                                              DMA_TO_DEVICE);
+                       else
+                               dma_unmap_single(mp->dev->dev.parent,
+                                                desc->buf_ptr,
+                                                desc->byte_cnt,
+                                                DMA_TO_DEVICE);
+               }
 
                if (cmd_sts & TX_ENABLE_INTERRUPT) {
                        struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
@@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
        struct tx_queue *txq = mp->txq + index;
        struct tx_desc *tx_desc;
        int size;
+       int ret;
        int i;
 
        txq->index = index;
@@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
                                        nexti * sizeof(struct tx_desc);
        }
 
+       txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
+                                      GFP_KERNEL);
+       if (!txq->tx_desc_mapping) {
+               ret = -ENOMEM;
+               goto err_free_desc_area;
+       }
+
        /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
        txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
                                           txq->tx_ring_size * TSO_HEADER_SIZE,
                                           &txq->tso_hdrs_dma, GFP_KERNEL);
        if (txq->tso_hdrs == NULL) {
-               dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
-                                 txq->tx_desc_area, txq->tx_desc_dma);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto err_free_desc_mapping;
        }
        skb_queue_head_init(&txq->tx_skb);
 
        return 0;
+
+err_free_desc_mapping:
+       kfree(txq->tx_desc_mapping);
+err_free_desc_area:
+       if (index == 0 && size <= mp->tx_desc_sram_size)
+               iounmap(txq->tx_desc_area);
+       else
+               dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+                                 txq->tx_desc_area, txq->tx_desc_dma);
+       return ret;
 }
 
 static void txq_deinit(struct tx_queue *txq)
@@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq)
        else
                dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
                                  txq->tx_desc_area, txq->tx_desc_dma);
+       kfree(txq->tx_desc_mapping);
+
        if (txq->tso_hdrs)
                dma_free_coherent(mp->dev->dev.parent,
                                  txq->tx_ring_size * TSO_HEADER_SIZE,
index 867a6a3ef81f72c4c9da17ea861ab1bf30628833..d9f4498832a10b400e31b461e3b821d1fe56f714 100644 (file)
@@ -1895,14 +1895,14 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
        ctrl = 0;
 
        /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                if (!le) {
                        le = get_tx_le(sky2, &slot);
                        le->addr = 0;
                        le->opcode = OP_VLAN|HW_OWNER;
                } else
                        le->opcode |= OP_VLAN;
-               le->length = cpu_to_be16(vlan_tx_tag_get(skb));
+               le->length = cpu_to_be16(skb_vlan_tag_get(skb));
                ctrl |= INS_VLAN;
        }
 
@@ -2594,7 +2594,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
        sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
        prefetch(sky2->rx_ring + sky2->rx_next);
 
-       if (vlan_tx_tag_present(re->skb))
+       if (skb_vlan_tag_present(re->skb))
                count -= VLAN_HLEN;     /* Account for vlan tag */
 
        /* This chip has hardware problems that generates bogus status.
index 963dd7e6d54780bc21233a016e6bed0ef48866da..a716c26e0d99cce1d62a6a1e6472bc43bcc6c2ee 100644 (file)
@@ -592,7 +592,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
                buf->nbufs        = 1;
                buf->npages       = 1;
                buf->page_shift   = get_order(size) + PAGE_SHIFT;
-               buf->direct.buf   = dma_alloc_coherent(&dev->pdev->dev,
+               buf->direct.buf   = dma_alloc_coherent(&dev->persist->pdev->dev,
                                                       size, &t, gfp);
                if (!buf->direct.buf)
                        return -ENOMEM;
@@ -619,7 +619,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
 
                for (i = 0; i < buf->nbufs; ++i) {
                        buf->page_list[i].buf =
-                               dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+                               dma_alloc_coherent(&dev->persist->pdev->dev,
+                                                  PAGE_SIZE,
                                                   &t, gfp);
                        if (!buf->page_list[i].buf)
                                goto err_free;
@@ -657,7 +658,8 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
        int i;
 
        if (buf->nbufs == 1)
-               dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
+               dma_free_coherent(&dev->persist->pdev->dev, size,
+                                 buf->direct.buf,
                                  buf->direct.map);
        else {
                if (BITS_PER_LONG == 64 && buf->direct.buf)
@@ -665,7 +667,8 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
 
                for (i = 0; i < buf->nbufs; ++i)
                        if (buf->page_list[i].buf)
-                               dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+                               dma_free_coherent(&dev->persist->pdev->dev,
+                                                 PAGE_SIZE,
                                                  buf->page_list[i].buf,
                                                  buf->page_list[i].map);
                kfree(buf->page_list);
@@ -738,7 +741,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp
                if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
                        goto out;
 
-       pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev), gfp);
+       pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp);
        if (!pgdir) {
                ret = -ENOMEM;
                goto out;
@@ -775,7 +778,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
        set_bit(i, db->u.pgdir->bits[o]);
 
        if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
-               dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+               dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
                                  db->u.pgdir->db_page, db->u.pgdir->db_dma);
                list_del(&db->u.pgdir->list);
                kfree(db->u.pgdir);
index 9c656fe4983dda7fbb10dd1b195bf12b2808c8e8..715de8affcc950e0ea18fd706bc8f04542d34a6f 100644 (file)
@@ -40,16 +40,177 @@ enum {
        MLX4_CATAS_POLL_INTERVAL        = 5 * HZ,
 };
 
-static DEFINE_SPINLOCK(catas_lock);
 
-static LIST_HEAD(catas_list);
-static struct work_struct catas_work;
 
-static int internal_err_reset = 1;
-module_param(internal_err_reset, int, 0644);
+int mlx4_internal_err_reset = 1;
+module_param_named(internal_err_reset, mlx4_internal_err_reset,  int, 0644);
 MODULE_PARM_DESC(internal_err_reset,
-                "Reset device on internal errors if non-zero"
-                " (default 1, in SRIOV mode default is 0)");
+                "Reset device on internal errors if non-zero (default 1)");
+
+static int read_vendor_id(struct mlx4_dev *dev)
+{
+       u16 vendor_id = 0;
+       int ret;
+
+       ret = pci_read_config_word(dev->persist->pdev, 0, &vendor_id);
+       if (ret) {
+               mlx4_err(dev, "Failed to read vendor ID, ret=%d\n", ret);
+               return ret;
+       }
+
+       if (vendor_id == 0xffff) {
+               mlx4_err(dev, "PCI can't be accessed to read vendor id\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int mlx4_reset_master(struct mlx4_dev *dev)
+{
+       int err = 0;
+
+       if (mlx4_is_master(dev))
+               mlx4_report_internal_err_comm_event(dev);
+
+       if (!pci_channel_offline(dev->persist->pdev)) {
+               err = read_vendor_id(dev);
+               /* If PCI can't be accessed to read vendor ID we assume that its
+                * link was disabled and chip was already reset.
+                */
+               if (err)
+                       return 0;
+
+               err = mlx4_reset(dev);
+               if (err)
+                       mlx4_err(dev, "Fail to reset HCA\n");
+       }
+
+       return err;
+}
+
+static int mlx4_reset_slave(struct mlx4_dev *dev)
+{
+#define COM_CHAN_RST_REQ_OFFSET 0x10
+#define COM_CHAN_RST_ACK_OFFSET 0x08
+
+       u32 comm_flags;
+       u32 rst_req;
+       u32 rst_ack;
+       unsigned long end;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       if (pci_channel_offline(dev->persist->pdev))
+               return 0;
+
+       comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
+                                 MLX4_COMM_CHAN_FLAGS));
+       if (comm_flags == 0xffffffff) {
+               mlx4_err(dev, "VF reset is not needed\n");
+               return 0;
+       }
+
+       if (!(dev->caps.vf_caps & MLX4_VF_CAP_FLAG_RESET)) {
+               mlx4_err(dev, "VF reset is not supported\n");
+               return -EOPNOTSUPP;
+       }
+
+       rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >>
+               COM_CHAN_RST_REQ_OFFSET;
+       rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >>
+               COM_CHAN_RST_ACK_OFFSET;
+       if (rst_req != rst_ack) {
+               mlx4_err(dev, "Communication channel isn't sync, fail to send reset\n");
+               return -EIO;
+       }
+
+       rst_req ^= 1;
+       mlx4_warn(dev, "VF is sending reset request to Firmware\n");
+       comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET;
+       __raw_writel((__force u32)cpu_to_be32(comm_flags),
+                    (__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS);
+       /* Make sure that our comm channel write doesn't
+        * get mixed in with writes from another CPU.
+        */
+       mmiowb();
+
+       end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies;
+       while (time_before(jiffies, end)) {
+               comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
+                                         MLX4_COMM_CHAN_FLAGS));
+               rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >>
+                       COM_CHAN_RST_ACK_OFFSET;
+
+               /* Reading rst_req again since the communication channel can
+                * be reset at any time by the PF and all its bits will be
+                * set to zero.
+                */
+               rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >>
+                       COM_CHAN_RST_REQ_OFFSET;
+
+               if (rst_ack == rst_req) {
+                       mlx4_warn(dev, "VF Reset succeed\n");
+                       return 0;
+               }
+               cond_resched();
+       }
+       mlx4_err(dev, "Fail to send reset over the communication channel\n");
+       return -ETIMEDOUT;
+}
+
+static int mlx4_comm_internal_err(u32 slave_read)
+{
+       return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
+               (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
+}
+
+void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
+{
+       int err;
+       struct mlx4_dev *dev;
+
+       if (!mlx4_internal_err_reset)
+               return;
+
+       mutex_lock(&persist->device_state_mutex);
+       if (persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
+               goto out;
+
+       dev = persist->dev;
+       mlx4_err(dev, "device is going to be reset\n");
+       if (mlx4_is_slave(dev))
+               err = mlx4_reset_slave(dev);
+       else
+               err = mlx4_reset_master(dev);
+       BUG_ON(err != 0);
+
+       dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR;
+       mlx4_err(dev, "device was reset successfully\n");
+       mutex_unlock(&persist->device_state_mutex);
+
+       /* At that step HW was already reset, now notify clients */
+       mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
+       mlx4_cmd_wake_completions(dev);
+       return;
+
+out:
+       mutex_unlock(&persist->device_state_mutex);
+}
+
+static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
+{
+       int err = 0;
+
+       mlx4_enter_error_state(persist);
+       mutex_lock(&persist->interface_state_mutex);
+       if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
+           !(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
+               err = mlx4_restart_one(persist->pdev);
+               mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n",
+                         err);
+       }
+       mutex_unlock(&persist->interface_state_mutex);
+}
 
 static void dump_err_buf(struct mlx4_dev *dev)
 {
@@ -67,58 +228,40 @@ static void poll_catas(unsigned long dev_ptr)
 {
        struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
        struct mlx4_priv *priv = mlx4_priv(dev);
+       u32 slave_read;
 
-       if (readl(priv->catas_err.map)) {
-               /* If the device is off-line, we cannot try to recover it */
-               if (pci_channel_offline(dev->pdev))
-                       mod_timer(&priv->catas_err.timer,
-                                 round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
-               else {
-                       dump_err_buf(dev);
-                       mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
-
-                       if (internal_err_reset) {
-                               spin_lock(&catas_lock);
-                               list_add(&priv->catas_err.list, &catas_list);
-                               spin_unlock(&catas_lock);
-
-                               queue_work(mlx4_wq, &catas_work);
-                       }
+       if (mlx4_is_slave(dev)) {
+               slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
+               if (mlx4_comm_internal_err(slave_read)) {
+                       mlx4_warn(dev, "Internal error detected on the communication channel\n");
+                       goto internal_err;
                }
-       } else
-               mod_timer(&priv->catas_err.timer,
-                         round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
+       } else if (readl(priv->catas_err.map)) {
+               dump_err_buf(dev);
+               goto internal_err;
+       }
+
+       if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+               mlx4_warn(dev, "Internal error mark was detected on device\n");
+               goto internal_err;
+       }
+
+       mod_timer(&priv->catas_err.timer,
+                 round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
+       return;
+
+internal_err:
+       if (mlx4_internal_err_reset)
+               queue_work(dev->persist->catas_wq, &dev->persist->catas_work);
 }
 
 static void catas_reset(struct work_struct *work)
 {
-       struct mlx4_priv *priv, *tmppriv;
-       struct mlx4_dev *dev;
+       struct mlx4_dev_persistent *persist =
+               container_of(work, struct mlx4_dev_persistent,
+                            catas_work);
 
-       LIST_HEAD(tlist);
-       int ret;
-
-       spin_lock_irq(&catas_lock);
-       list_splice_init(&catas_list, &tlist);
-       spin_unlock_irq(&catas_lock);
-
-       list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
-               struct pci_dev *pdev = priv->dev.pdev;
-
-               /* If the device is off-line, we cannot reset it */
-               if (pci_channel_offline(pdev))
-                       continue;
-
-               ret = mlx4_restart_one(priv->dev.pdev);
-               /* 'priv' now is not valid */
-               if (ret)
-                       pr_err("mlx4 %s: Reset failed (%d)\n",
-                              pci_name(pdev), ret);
-               else {
-                       dev  = pci_get_drvdata(pdev);
-                       mlx4_dbg(dev, "Reset succeeded\n");
-               }
-       }
+       mlx4_handle_error_state(persist);
 }
 
 void mlx4_start_catas_poll(struct mlx4_dev *dev)
@@ -126,22 +269,21 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
        phys_addr_t addr;
 
-       /*If we are in SRIOV the default of the module param must be 0*/
-       if (mlx4_is_mfunc(dev))
-               internal_err_reset = 0;
-
        INIT_LIST_HEAD(&priv->catas_err.list);
        init_timer(&priv->catas_err.timer);
        priv->catas_err.map = NULL;
 
-       addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
-               priv->fw.catas_offset;
+       if (!mlx4_is_slave(dev)) {
+               addr = pci_resource_start(dev->persist->pdev,
+                                         priv->fw.catas_bar) +
+                                         priv->fw.catas_offset;
 
-       priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
-       if (!priv->catas_err.map) {
-               mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
-                         (unsigned long long) addr);
-               return;
+               priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
+               if (!priv->catas_err.map) {
+                       mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
+                                 (unsigned long long)addr);
+                       return;
+               }
        }
 
        priv->catas_err.timer.data     = (unsigned long) dev;
@@ -157,15 +299,29 @@ void mlx4_stop_catas_poll(struct mlx4_dev *dev)
 
        del_timer_sync(&priv->catas_err.timer);
 
-       if (priv->catas_err.map)
+       if (priv->catas_err.map) {
                iounmap(priv->catas_err.map);
+               priv->catas_err.map = NULL;
+       }
 
-       spin_lock_irq(&catas_lock);
-       list_del(&priv->catas_err.list);
-       spin_unlock_irq(&catas_lock);
+       if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION)
+               flush_workqueue(dev->persist->catas_wq);
 }
 
-void  __init mlx4_catas_init(void)
+int  mlx4_catas_init(struct mlx4_dev *dev)
 {
-       INIT_WORK(&catas_work, catas_reset);
+       INIT_WORK(&dev->persist->catas_work, catas_reset);
+       dev->persist->catas_wq = create_singlethread_workqueue("mlx4_health");
+       if (!dev->persist->catas_wq)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void mlx4_catas_end(struct mlx4_dev *dev)
+{
+       if (dev->persist->catas_wq) {
+               destroy_workqueue(dev->persist->catas_wq);
+               dev->persist->catas_wq = NULL;
+       }
 }
index 5c93d1451c449e328fe05505bc848a146af3df9e..a681d7c0bb9f066f8d48ed068f68f0001dad8d0f 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/mlx4/device.h>
 #include <linux/semaphore.h>
 #include <rdma/ib_smi.h>
+#include <linux/delay.h>
 
 #include <asm/io.h>
 
@@ -182,6 +183,72 @@ static u8 mlx4_errno_to_status(int errno)
        }
 }
 
+static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
+                                      u8 op_modifier)
+{
+       switch (op) {
+       case MLX4_CMD_UNMAP_ICM:
+       case MLX4_CMD_UNMAP_ICM_AUX:
+       case MLX4_CMD_UNMAP_FA:
+       case MLX4_CMD_2RST_QP:
+       case MLX4_CMD_HW2SW_EQ:
+       case MLX4_CMD_HW2SW_CQ:
+       case MLX4_CMD_HW2SW_SRQ:
+       case MLX4_CMD_HW2SW_MPT:
+       case MLX4_CMD_CLOSE_HCA:
+       case MLX4_QP_FLOW_STEERING_DETACH:
+       case MLX4_CMD_FREE_RES:
+       case MLX4_CMD_CLOSE_PORT:
+               return CMD_STAT_OK;
+
+       case MLX4_CMD_QP_ATTACH:
+               /* On Detach case return success */
+               if (op_modifier == 0)
+                       return CMD_STAT_OK;
+               return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
+
+       default:
+               return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
+       }
+}
+
+static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
+{
+       /* Any error during the closing commands below is considered fatal */
+       if (op == MLX4_CMD_CLOSE_HCA ||
+           op == MLX4_CMD_HW2SW_EQ ||
+           op == MLX4_CMD_HW2SW_CQ ||
+           op == MLX4_CMD_2RST_QP ||
+           op == MLX4_CMD_HW2SW_SRQ ||
+           op == MLX4_CMD_SYNC_TPT ||
+           op == MLX4_CMD_UNMAP_ICM ||
+           op == MLX4_CMD_UNMAP_ICM_AUX ||
+           op == MLX4_CMD_UNMAP_FA)
+               return 1;
+       /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
+         * CMD_STAT_REG_BOUND.
+         * This status indicates that memory region has memory windows bound to it
+         * which may result from invalid user space usage and is not fatal.
+         */
+       if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
+               return 1;
+       return 0;
+}
+
+static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
+                              int err)
+{
+       /* Only if reset flow is really active return code is based on
+         * command, otherwise current error code is returned.
+         */
+       if (mlx4_internal_err_reset) {
+               mlx4_enter_error_state(dev->persist);
+               err = mlx4_internal_err_ret_value(dev, op, op_modifier);
+       }
+
+       return err;
+}
+
 static int comm_pending(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -190,16 +257,30 @@ static int comm_pending(struct mlx4_dev *dev)
        return (swab32(status) >> 31) != priv->cmd.comm_toggle;
 }
 
-static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
+static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        u32 val;
 
+       /* To avoid writing to unknown addresses after the device state was
+        * changed to internal error and the function was rest,
+        * check the INTERNAL_ERROR flag which is updated under
+        * device_state_mutex lock.
+        */
+       mutex_lock(&dev->persist->device_state_mutex);
+
+       if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+               mutex_unlock(&dev->persist->device_state_mutex);
+               return -EIO;
+       }
+
        priv->cmd.comm_toggle ^= 1;
        val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
        __raw_writel((__force u32) cpu_to_be32(val),
                     &priv->mfunc.comm->slave_write);
        mmiowb();
+       mutex_unlock(&dev->persist->device_state_mutex);
+       return 0;
 }
 
 static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
@@ -219,7 +300,13 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
 
        /* Write command */
        down(&priv->cmd.poll_sem);
-       mlx4_comm_cmd_post(dev, cmd, param);
+       if (mlx4_comm_cmd_post(dev, cmd, param)) {
+               /* Only in case the device state is INTERNAL_ERROR,
+                * mlx4_comm_cmd_post returns with an error
+                */
+               err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
+               goto out;
+       }
 
        end = msecs_to_jiffies(timeout) + jiffies;
        while (comm_pending(dev) && time_before(jiffies, end))
@@ -231,18 +318,23 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
                 * is MLX4_DELAY_RESET_SLAVE*/
                if ((MLX4_COMM_CMD_RESET == cmd)) {
                        err = MLX4_DELAY_RESET_SLAVE;
+                       goto out;
                } else {
-                       mlx4_warn(dev, "Communication channel timed out\n");
-                       err = -ETIMEDOUT;
+                       mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
+                                 cmd);
+                       err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
                }
        }
 
+       if (err)
+               mlx4_enter_error_state(dev->persist);
+out:
        up(&priv->cmd.poll_sem);
        return err;
 }
 
-static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
-                             u16 param, unsigned long timeout)
+static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
+                             u16 param, u16 op, unsigned long timeout)
 {
        struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
        struct mlx4_cmd_context *context;
@@ -258,34 +350,49 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
        cmd->free_head = context->next;
        spin_unlock(&cmd->context_lock);
 
-       init_completion(&context->done);
+       reinit_completion(&context->done);
 
-       mlx4_comm_cmd_post(dev, op, param);
+       if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
+               /* Only in case the device state is INTERNAL_ERROR,
+                * mlx4_comm_cmd_post returns with an error
+                */
+               err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
+               goto out;
+       }
 
        if (!wait_for_completion_timeout(&context->done,
                                         msecs_to_jiffies(timeout))) {
-               mlx4_warn(dev, "communication channel command 0x%x timed out\n",
-                         op);
-               err = -EBUSY;
-               goto out;
+               mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
+                         vhcr_cmd, op);
+               goto out_reset;
        }
 
        err = context->result;
        if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
                mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
-                        op, context->fw_status);
-               goto out;
+                        vhcr_cmd, context->fw_status);
+               if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
+                       goto out_reset;
        }
 
-out:
        /* wait for comm channel ready
         * this is necessary for prevention the race
         * when switching between event to polling mode
+        * Skipping this section in case the device is in FATAL_ERROR state,
+        * In this state, no commands are sent via the comm channel until
+        * the device has returned from reset.
         */
-       end = msecs_to_jiffies(timeout) + jiffies;
-       while (comm_pending(dev) && time_before(jiffies, end))
-               cond_resched();
+       if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
+               end = msecs_to_jiffies(timeout) + jiffies;
+               while (comm_pending(dev) && time_before(jiffies, end))
+                       cond_resched();
+       }
+       goto out;
 
+out_reset:
+       err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
+       mlx4_enter_error_state(dev->persist);
+out:
        spin_lock(&cmd->context_lock);
        context->next = cmd->free_head;
        cmd->free_head = context - cmd->context;
@@ -296,10 +403,13 @@ out:
 }
 
 int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
-                 unsigned long timeout)
+                 u16 op, unsigned long timeout)
 {
+       if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
+               return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
+
        if (mlx4_priv(dev)->cmd.use_events)
-               return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
+               return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
        return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
 }
 
@@ -307,7 +417,7 @@ static int cmd_pending(struct mlx4_dev *dev)
 {
        u32 status;
 
-       if (pci_channel_offline(dev->pdev))
+       if (pci_channel_offline(dev->persist->pdev))
                return -EIO;
 
        status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
@@ -323,17 +433,21 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
 {
        struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
        u32 __iomem *hcr = cmd->hcr;
-       int ret = -EAGAIN;
+       int ret = -EIO;
        unsigned long end;
 
-       mutex_lock(&cmd->hcr_mutex);
-
-       if (pci_channel_offline(dev->pdev)) {
+       mutex_lock(&dev->persist->device_state_mutex);
+       /* To avoid writing to unknown addresses after the device state was
+         * changed to internal error and the chip was reset,
+         * check the INTERNAL_ERROR flag which is updated under
+         * device_state_mutex lock.
+         */
+       if (pci_channel_offline(dev->persist->pdev) ||
+           (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
                /*
                 * Device is going through error recovery
                 * and cannot accept commands.
                 */
-               ret = -EIO;
                goto out;
        }
 
@@ -342,12 +456,11 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
                end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
 
        while (cmd_pending(dev)) {
-               if (pci_channel_offline(dev->pdev)) {
+               if (pci_channel_offline(dev->persist->pdev)) {
                        /*
                         * Device is going through error recovery
                         * and cannot accept commands.
                         */
-                       ret = -EIO;
                        goto out;
                }
 
@@ -391,7 +504,11 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
        ret = 0;
 
 out:
-       mutex_unlock(&cmd->hcr_mutex);
+       if (ret)
+               mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
+                         op, ret, in_param, in_modifier, op_modifier);
+       mutex_unlock(&dev->persist->device_state_mutex);
+
        return ret;
 }
 
@@ -428,8 +545,11 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                        }
                        ret = mlx4_status_to_errno(vhcr->status);
                }
+               if (ret &&
+                   dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
+                       ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
        } else {
-               ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
+               ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
                                    MLX4_COMM_TIME + timeout);
                if (!ret) {
                        if (out_is_imm) {
@@ -443,9 +563,14 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                                }
                        }
                        ret = mlx4_status_to_errno(vhcr->status);
-               } else
-                       mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
-                                op);
+               } else {
+                       if (dev->persist->state &
+                           MLX4_DEVICE_STATE_INTERNAL_ERROR)
+                               ret = mlx4_internal_err_ret_value(dev, op,
+                                                                 op_modifier);
+                       else
+                               mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
+               }
        }
 
        mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@ -464,12 +589,12 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 
        down(&priv->cmd.poll_sem);
 
-       if (pci_channel_offline(dev->pdev)) {
+       if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
                /*
                 * Device is going through error recovery
                 * and cannot accept commands.
                 */
-               err = -EIO;
+               err = mlx4_internal_err_ret_value(dev, op, op_modifier);
                goto out;
        }
 
@@ -483,16 +608,21 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
        err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
                            in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
        if (err)
-               goto out;
+               goto out_reset;
 
        end = msecs_to_jiffies(timeout) + jiffies;
        while (cmd_pending(dev) && time_before(jiffies, end)) {
-               if (pci_channel_offline(dev->pdev)) {
+               if (pci_channel_offline(dev->persist->pdev)) {
                        /*
                         * Device is going through error recovery
                         * and cannot accept commands.
                         */
                        err = -EIO;
+                       goto out_reset;
+               }
+
+               if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+                       err = mlx4_internal_err_ret_value(dev, op, op_modifier);
                        goto out;
                }
 
@@ -502,8 +632,8 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
        if (cmd_pending(dev)) {
                mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
                          op);
-               err = -ETIMEDOUT;
-               goto out;
+               err = -EIO;
+               goto out_reset;
        }
 
        if (out_is_imm)
@@ -515,10 +645,17 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
        stat = be32_to_cpu((__force __be32)
                           __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
        err = mlx4_status_to_errno(stat);
-       if (err)
+       if (err) {
                mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
                         op, stat);
+               if (mlx4_closing_cmd_fatal_error(op, stat))
+                       goto out_reset;
+               goto out;
+       }
 
+out_reset:
+       if (err)
+               err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
 out:
        up(&priv->cmd.poll_sem);
        return err;
@@ -565,17 +702,19 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                goto out;
        }
 
-       init_completion(&context->done);
+       reinit_completion(&context->done);
 
-       mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
-                     in_modifier, op_modifier, op, context->token, 1);
+       err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
+                           in_modifier, op_modifier, op, context->token, 1);
+       if (err)
+               goto out_reset;
 
        if (!wait_for_completion_timeout(&context->done,
                                         msecs_to_jiffies(timeout))) {
                mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
                          op);
-               err = -EBUSY;
-               goto out;
+               err = -EIO;
+               goto out_reset;
        }
 
        err = context->result;
@@ -592,12 +731,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                else
                        mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
                                 op, context->fw_status);
+               if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
+                       err = mlx4_internal_err_ret_value(dev, op, op_modifier);
+               else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
+                       goto out_reset;
+
                goto out;
        }
 
        if (out_is_imm)
                *out_param = context->out_param;
 
+out_reset:
+       if (err)
+               err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
 out:
        spin_lock(&cmd->context_lock);
        context->next = cmd->free_head;
@@ -612,10 +759,13 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
               int out_is_imm, u32 in_modifier, u8 op_modifier,
               u16 op, unsigned long timeout, int native)
 {
-       if (pci_channel_offline(dev->pdev))
-               return -EIO;
+       if (pci_channel_offline(dev->persist->pdev))
+               return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
 
        if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
+               if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
+                       return mlx4_internal_err_ret_value(dev, op,
+                                                         op_modifier);
                if (mlx4_priv(dev)->cmd.use_events)
                        return mlx4_cmd_wait(dev, in_param, out_param,
                                             out_is_imm, in_modifier,
@@ -631,7 +781,7 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 EXPORT_SYMBOL_GPL(__mlx4_cmd);
 
 
-static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
+int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
 {
        return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
                        MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
@@ -751,7 +901,9 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
                                index = be32_to_cpu(smp->attr_mod);
                                if (port < 1 || port > dev->caps.num_ports)
                                        return -EINVAL;
-                               table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL);
+                               table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
+                                               sizeof(*table) * 32, GFP_KERNEL);
+
                                if (!table)
                                        return -ENOMEM;
                                /* need to get the full pkey table because the paravirtualized
@@ -1071,7 +1223,7 @@ static struct mlx4_cmd_info cmd_info[] = {
        {
                .opcode = MLX4_CMD_HW2SW_EQ,
                .has_inbox = false,
-               .has_outbox = true,
+               .has_outbox = false,
                .out_is_imm = false,
                .encode_slave_id = true,
                .verify = NULL,
@@ -1431,6 +1583,15 @@ static struct mlx4_cmd_info cmd_info[] = {
                .verify = NULL,
                .wrapper = mlx4_CMD_EPERM_wrapper
        },
+       {
+               .opcode = MLX4_CMD_VIRT_PORT_MAP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_CMD_EPERM_wrapper
+       },
 };
 
 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
@@ -1460,8 +1621,10 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
                                      ALIGN(sizeof(struct mlx4_vhcr_cmd),
                                            MLX4_ACCESS_MEM_ALIGN), 1);
                if (ret) {
-                       mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
-                                __func__, ret);
+                       if (!(dev->persist->state &
+                           MLX4_DEVICE_STATE_INTERNAL_ERROR))
+                               mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
+                                        __func__, ret);
                        kfree(vhcr);
                        return ret;
                }
@@ -1500,11 +1663,14 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
                        goto out_status;
                }
 
-               if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
-                                   vhcr->in_param,
-                                   MLX4_MAILBOX_SIZE, 1)) {
-                       mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
-                                __func__, cmd->opcode);
+               ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
+                                     vhcr->in_param,
+                                     MLX4_MAILBOX_SIZE, 1);
+               if (ret) {
+                       if (!(dev->persist->state &
+                           MLX4_DEVICE_STATE_INTERNAL_ERROR))
+                               mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
+                                        __func__, cmd->opcode);
                        vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
                        goto out_status;
                }
@@ -1552,8 +1718,9 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
        }
 
        if (err) {
-               mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
-                         vhcr->op, slave, vhcr->errno, err);
+               if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR))
+                       mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
+                                 vhcr->op, slave, vhcr->errno, err);
                vhcr_cmd->status = mlx4_errno_to_status(err);
                goto out_status;
        }
@@ -1568,7 +1735,9 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
                        /* If we failed to write back the outbox after the
                         *command was successfully executed, we must fail this
                         * slave, as it is now in undefined state */
-                       mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
+                       if (!(dev->persist->state &
+                           MLX4_DEVICE_STATE_INTERNAL_ERROR))
+                               mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
                        goto out;
                }
        }
@@ -1847,8 +2016,11 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
                break;
        case MLX4_COMM_CMD_VHCR_POST:
                if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
-                   (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
+                   (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
+                       mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
+                                 slave, cmd, slave_state[slave].last_cmd);
                        goto reset_slave;
+               }
 
                mutex_lock(&priv->cmd.slave_cmd_mutex);
                if (mlx4_master_process_vhcr(dev, slave, NULL)) {
@@ -1882,7 +2054,18 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
 
 reset_slave:
        /* cleanup any slave resources */
-       mlx4_delete_all_resources_for_slave(dev, slave);
+       if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
+               mlx4_delete_all_resources_for_slave(dev, slave);
+
+       if (cmd != MLX4_COMM_CMD_RESET) {
+               mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
+                         slave, cmd);
+               /* Turn on internal error letting slave reset itself immeditaly,
+                * otherwise it might take till timeout on command is passed
+                */
+               reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
+       }
+
        spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
        if (!slave_state[slave].is_slave_going_down)
                slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
@@ -1958,17 +2141,28 @@ void mlx4_master_comm_channel(struct work_struct *work)
 static int sync_toggles(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       int wr_toggle;
-       int rd_toggle;
+       u32 wr_toggle;
+       u32 rd_toggle;
        unsigned long end;
 
-       wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
-       end = jiffies + msecs_to_jiffies(5000);
+       wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
+       if (wr_toggle == 0xffffffff)
+               end = jiffies + msecs_to_jiffies(30000);
+       else
+               end = jiffies + msecs_to_jiffies(5000);
 
        while (time_before(jiffies, end)) {
-               rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
-               if (rd_toggle == wr_toggle) {
-                       priv->cmd.comm_toggle = rd_toggle;
+               rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
+               if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
+                       /* PCI might be offline */
+                       msleep(100);
+                       wr_toggle = swab32(readl(&priv->mfunc.comm->
+                                          slave_write));
+                       continue;
+               }
+
+               if (rd_toggle >> 31 == wr_toggle >> 31) {
+                       priv->cmd.comm_toggle = rd_toggle >> 31;
                        return 0;
                }
 
@@ -1997,11 +2191,12 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
 
        if (mlx4_is_master(dev))
                priv->mfunc.comm =
-               ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
+               ioremap(pci_resource_start(dev->persist->pdev,
+                                          priv->fw.comm_bar) +
                        priv->fw.comm_base, MLX4_COMM_PAGESIZE);
        else
                priv->mfunc.comm =
-               ioremap(pci_resource_start(dev->pdev, 2) +
+               ioremap(pci_resource_start(dev->persist->pdev, 2) +
                        MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
        if (!priv->mfunc.comm) {
                mlx4_err(dev, "Couldn't map communication vector\n");
@@ -2073,13 +2268,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
                if (mlx4_init_resource_tracker(dev))
                        goto err_thread;
 
-               err = mlx4_ARM_COMM_CHANNEL(dev);
-               if (err) {
-                       mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
-                                err);
-                       goto err_resource;
-               }
-
        } else {
                err = sync_toggles(dev);
                if (err) {
@@ -2089,8 +2277,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
        }
        return 0;
 
-err_resource:
-       mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL);
 err_thread:
        flush_workqueue(priv->mfunc.master.comm_wq);
        destroy_workqueue(priv->mfunc.master.comm_wq);
@@ -2107,9 +2293,9 @@ err_comm_admin:
 err_comm:
        iounmap(priv->mfunc.comm);
 err_vhcr:
-       dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
-                                            priv->mfunc.vhcr,
-                                            priv->mfunc.vhcr_dma);
+       dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
+                         priv->mfunc.vhcr,
+                         priv->mfunc.vhcr_dma);
        priv->mfunc.vhcr = NULL;
        return -ENOMEM;
 }
@@ -2120,7 +2306,6 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
        int flags = 0;
 
        if (!priv->cmd.initialized) {
-               mutex_init(&priv->cmd.hcr_mutex);
                mutex_init(&priv->cmd.slave_cmd_mutex);
                sema_init(&priv->cmd.poll_sem, 1);
                priv->cmd.use_events = 0;
@@ -2130,8 +2315,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
        }
 
        if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
-               priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
-                                       MLX4_HCR_BASE, MLX4_HCR_SIZE);
+               priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
+                                       0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
                if (!priv->cmd.hcr) {
                        mlx4_err(dev, "Couldn't map command register\n");
                        goto err;
@@ -2140,7 +2325,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
        }
 
        if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
-               priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
+               priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
+                                                     PAGE_SIZE,
                                                      &priv->mfunc.vhcr_dma,
                                                      GFP_KERNEL);
                if (!priv->mfunc.vhcr)
@@ -2150,7 +2336,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
        }
 
        if (!priv->cmd.pool) {
-               priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
+               priv->cmd.pool = pci_pool_create("mlx4_cmd",
+                                                dev->persist->pdev,
                                                 MLX4_MAILBOX_SIZE,
                                                 MLX4_MAILBOX_SIZE, 0);
                if (!priv->cmd.pool)
@@ -2166,6 +2353,27 @@ err:
        return -ENOMEM;
 }
 
+void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int slave;
+       u32 slave_read;
+
+       /* Report an internal error event to all
+        * communication channels.
+        */
+       for (slave = 0; slave < dev->num_slaves; slave++) {
+               slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
+               slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
+               __raw_writel((__force u32)cpu_to_be32(slave_read),
+                            &priv->mfunc.comm[slave].slave_read);
+               /* Make sure that our comm channel write doesn't
+                * get mixed in with writes from another CPU.
+                */
+               mmiowb();
+       }
+}
+
 void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2181,6 +2389,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
                kfree(priv->mfunc.master.slave_state);
                kfree(priv->mfunc.master.vf_admin);
                kfree(priv->mfunc.master.vf_oper);
+               dev->num_slaves = 0;
        }
 
        iounmap(priv->mfunc.comm);
@@ -2202,7 +2411,7 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
        }
        if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
            (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
-               dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+               dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
                                  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
                priv->mfunc.vhcr = NULL;
        }
@@ -2229,6 +2438,11 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
        for (i = 0; i < priv->cmd.max_cmds; ++i) {
                priv->cmd.context[i].token = i;
                priv->cmd.context[i].next  = i + 1;
+               /* To support fatal error flow, initialize all
+                * cmd contexts to allow simulating completions
+                * with complete() at any time.
+                */
+               init_completion(&priv->cmd.context[i].done);
        }
 
        priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
@@ -2306,8 +2520,9 @@ u32 mlx4_comm_get_version(void)
 
 static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
 {
-       if ((vf < 0) || (vf >= dev->num_vfs)) {
-               mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs);
+       if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
+               mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
+                        vf, dev->persist->num_vfs);
                return -EINVAL;
        }
 
@@ -2316,7 +2531,7 @@ static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
 
 int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
 {
-       if (slave < 1 || slave > dev->num_vfs) {
+       if (slave < 1 || slave > dev->persist->num_vfs) {
                mlx4_err(dev,
                         "Bad slave number:%d (number of activated slaves: %lu)\n",
                         slave, dev->num_slaves);
@@ -2325,6 +2540,25 @@ int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
        return slave - 1;
 }
 
+void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cmd_context *context;
+       int i;
+
+       spin_lock(&priv->cmd.context_lock);
+       if (priv->cmd.context) {
+               for (i = 0; i < priv->cmd.max_cmds; ++i) {
+                       context = &priv->cmd.context[i];
+                       context->fw_status = CMD_STAT_INTERNAL_ERR;
+                       context->result    =
+                               mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
+                       complete(&context->done);
+               }
+       }
+       spin_unlock(&priv->cmd.context_lock);
+}
+
 struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
 {
        struct mlx4_active_ports actv_ports;
@@ -2388,7 +2622,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
        if (port <= 0 || port > dev->caps.num_ports)
                return slaves_pport;
 
-       for (i = 0; i < dev->num_vfs + 1; i++) {
+       for (i = 0; i < dev->persist->num_vfs + 1; i++) {
                struct mlx4_active_ports actv_ports =
                        mlx4_get_active_ports(dev, i);
                if (test_bit(port - 1, actv_ports.ports))
@@ -2408,7 +2642,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
 
        bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
 
-       for (i = 0; i < dev->num_vfs + 1; i++) {
+       for (i = 0; i < dev->persist->num_vfs + 1; i++) {
                struct mlx4_active_ports actv_ports =
                        mlx4_get_active_ports(dev, i);
                if (bitmap_equal(crit_ports->ports, actv_ports.ports,
index 82322b1c8411b80ff9d15f8c8022a3095f69c4d0..22da4d0d0f05511dfc89a360e6df6871e96b6e7a 100644 (file)
@@ -70,10 +70,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
        /* Allocate HW buffers on provided NUMA node.
         * dev->numa_node is used in mtt range allocation flow.
         */
-       set_dev_node(&mdev->dev->pdev->dev, node);
+       set_dev_node(&mdev->dev->persist->pdev->dev, node);
        err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
                                cq->buf_size, 2 * PAGE_SIZE);
-       set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
+       set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
        if (err)
                goto err_cq;
 
index 90e0f045a6bc2ae29071e286dcb6360254997ceb..a7b58ba8492b5c2540111ab2622cb8819f1f21cb 100644 (file)
@@ -92,7 +92,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
                (u16) (mdev->dev->caps.fw_ver >> 32),
                (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
                (u16) (mdev->dev->caps.fw_ver & 0xffff));
-       strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
+       strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
                sizeof(drvinfo->bus_info));
        drvinfo->n_stats = 0;
        drvinfo->regdump_len = 0;
@@ -770,22 +770,20 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                return 0;
        }
 
-       proto_admin = cpu_to_be32(ptys_adv);
-       if (speed >= 0 && speed != priv->port_state.link_speed)
-               /* If speed was set then speed decides :-) */
-               proto_admin = speed_set_ptys_admin(priv, speed,
-                                                  ptys_reg.eth_proto_cap);
+       proto_admin = cmd->autoneg == AUTONEG_ENABLE ?
+               cpu_to_be32(ptys_adv) :
+               speed_set_ptys_admin(priv, speed,
+                                    ptys_reg.eth_proto_cap);
 
        proto_admin &= ptys_reg.eth_proto_cap;
-
-       if (proto_admin == ptys_reg.eth_proto_admin)
-               return 0; /* Nothing to change */
-
        if (!proto_admin) {
                en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
                return -EINVAL; /* nothing to change due to bad input */
        }
 
+       if (proto_admin == ptys_reg.eth_proto_admin)
+               return 0; /* Nothing to change */
+
        en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
               be32_to_cpu(proto_admin));
 
@@ -798,9 +796,9 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                return ret;
        }
 
-       en_warn(priv, "Port link mode changed, restarting port...\n");
        mutex_lock(&priv->mdev->state_lock);
        if (priv->port_up) {
+               en_warn(priv, "Port link mode changed, restarting port...\n");
                mlx4_en_stop_port(dev, 1);
                if (mlx4_en_start_port(dev))
                        en_err(priv, "Failed restarting port %d\n", priv->port);
index 9f16f754137bf2a10c9324632c3ef76b40d5eda0..58d5a07d0ff4da6397118fb4b3b051ac936ed95b 100644 (file)
@@ -214,6 +214,8 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
        iounmap(mdev->uar_map);
        mlx4_uar_free(dev, &mdev->priv_uar);
        mlx4_pd_free(dev, mdev->priv_pdn);
+       if (mdev->nb.notifier_call)
+               unregister_netdevice_notifier(&mdev->nb);
        kfree(mdev);
 }
 
@@ -241,8 +243,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
        spin_lock_init(&mdev->uar_lock);
 
        mdev->dev = dev;
-       mdev->dma_device = &(dev->pdev->dev);
-       mdev->pdev = dev->pdev;
+       mdev->dma_device = &dev->persist->pdev->dev;
+       mdev->pdev = dev->persist->pdev;
        mdev->device_up = false;
 
        mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
@@ -298,6 +300,12 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
                if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
                        mdev->pndev[i] = NULL;
        }
+       /* register notifier */
+       mdev->nb.notifier_call = mlx4_en_netdev_event;
+       if (register_netdevice_notifier(&mdev->nb)) {
+               mdev->nb.notifier_call = NULL;
+               mlx4_err(mdev, "Failed to create notifier\n");
+       }
 
        return mdev;
 
index d0d6dc1b8e46e8173cbd61f5c2e84d4292301bc4..2a210c4efb895728ec6ad12eaef9ec8f9ff7fd08 100644 (file)
@@ -475,7 +475,8 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
 {
        int err;
 
-       if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+       if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
+           priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
                return 0; /* do nothing */
 
        err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
@@ -2061,6 +2062,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
        /* Detach the netdev so tasks would not attempt to access it */
        mutex_lock(&mdev->state_lock);
        mdev->pndev[priv->port] = NULL;
+       mdev->upper[priv->port] = NULL;
        mutex_unlock(&mdev->state_lock);
 
        mlx4_en_free_resources(priv);
@@ -2200,6 +2202,10 @@ static int mlx4_en_set_features(struct net_device *netdev,
                        return ret;
        }
 
+       if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
+               en_info(priv, "Turn %s TX vlan strip offload\n",
+                       (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
+
        if (features & NETIF_F_LOOPBACK)
                priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
        else
@@ -2440,6 +2446,180 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
 #endif
 };
 
+struct mlx4_en_bond {
+       struct work_struct work;
+       struct mlx4_en_priv *priv;
+       int is_bonded;
+       struct mlx4_port_map port_map;
+};
+
+static void mlx4_en_bond_work(struct work_struct *work)
+{
+       struct mlx4_en_bond *bond = container_of(work,
+                                                    struct mlx4_en_bond,
+                                                    work);
+       int err = 0;
+       struct mlx4_dev *dev = bond->priv->mdev->dev;
+
+       if (bond->is_bonded) {
+               if (!mlx4_is_bonded(dev)) {
+                       err = mlx4_bond(dev);
+                       if (err)
+                               en_err(bond->priv, "Fail to bond device\n");
+               }
+               if (!err) {
+                       err = mlx4_port_map_set(dev, &bond->port_map);
+                       if (err)
+                               en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
+                                      bond->port_map.port1,
+                                      bond->port_map.port2,
+                                      err);
+               }
+       } else if (mlx4_is_bonded(dev)) {
+               err = mlx4_unbond(dev);
+               if (err)
+                       en_err(bond->priv, "Fail to unbond device\n");
+       }
+       dev_put(bond->priv->dev);
+       kfree(bond);
+}
+
+static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
+                                  u8 v2p_p1, u8 v2p_p2)
+{
+       struct mlx4_en_bond *bond = NULL;
+
+       bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
+       if (!bond)
+               return -ENOMEM;
+
+       INIT_WORK(&bond->work, mlx4_en_bond_work);
+       bond->priv = priv;
+       bond->is_bonded = is_bonded;
+       bond->port_map.port1 = v2p_p1;
+       bond->port_map.port2 = v2p_p2;
+       dev_hold(priv->dev);
+       queue_work(priv->mdev->workqueue, &bond->work);
+       return 0;
+}
+
+int mlx4_en_netdev_event(struct notifier_block *this,
+                        unsigned long event, void *ptr)
+{
+       struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+       u8 port = 0;
+       struct mlx4_en_dev *mdev;
+       struct mlx4_dev *dev;
+       int i, num_eth_ports = 0;
+       bool do_bond = true;
+       struct mlx4_en_priv *priv;
+       u8 v2p_port1 = 0;
+       u8 v2p_port2 = 0;
+
+       if (!net_eq(dev_net(ndev), &init_net))
+               return NOTIFY_DONE;
+
+       mdev = container_of(this, struct mlx4_en_dev, nb);
+       dev = mdev->dev;
+
+       /* Go into this mode only when two network devices set on two ports
+        * of the same mlx4 device are slaves of the same bonding master
+        */
+       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+               ++num_eth_ports;
+               if (!port && (mdev->pndev[i] == ndev))
+                       port = i;
+               mdev->upper[i] = mdev->pndev[i] ?
+                       netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
+               /* condition not met: network device is a slave */
+               if (!mdev->upper[i])
+                       do_bond = false;
+               if (num_eth_ports < 2)
+                       continue;
+               /* condition not met: same master */
+               if (mdev->upper[i] != mdev->upper[i-1])
+                       do_bond = false;
+       }
+       /* condition not met: 2 salves */
+       do_bond = (num_eth_ports ==  2) ? do_bond : false;
+
+       /* handle only events that come with enough info */
+       if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
+               return NOTIFY_DONE;
+
+       priv = netdev_priv(ndev);
+       if (do_bond) {
+               struct netdev_notifier_bonding_info *notifier_info = ptr;
+               struct netdev_bonding_info *bonding_info =
+                       &notifier_info->bonding_info;
+
+               /* required mode 1, 2 or 4 */
+               if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
+                   (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
+                   (bonding_info->master.bond_mode != BOND_MODE_8023AD))
+                       do_bond = false;
+
+               /* require exactly 2 slaves */
+               if (bonding_info->master.num_slaves != 2)
+                       do_bond = false;
+
+               /* calc v2p */
+               if (do_bond) {
+                       if (bonding_info->master.bond_mode ==
+                           BOND_MODE_ACTIVEBACKUP) {
+                               /* in active-backup mode virtual ports are
+                                * mapped to the physical port of the active
+                                * slave */
+                               if (bonding_info->slave.state ==
+                                   BOND_STATE_BACKUP) {
+                                       if (port == 1) {
+                                               v2p_port1 = 2;
+                                               v2p_port2 = 2;
+                                       } else {
+                                               v2p_port1 = 1;
+                                               v2p_port2 = 1;
+                                       }
+                               } else { /* BOND_STATE_ACTIVE */
+                                       if (port == 1) {
+                                               v2p_port1 = 1;
+                                               v2p_port2 = 1;
+                                       } else {
+                                               v2p_port1 = 2;
+                                               v2p_port2 = 2;
+                                       }
+                               }
+                       } else { /* Active-Active */
+                               /* in active-active mode a virtual port is
+                                * mapped to the native physical port if and only
+                                * if the physical port is up */
+                               __s8 link = bonding_info->slave.link;
+
+                               if (port == 1)
+                                       v2p_port2 = 2;
+                               else
+                                       v2p_port1 = 1;
+                               if ((link == BOND_LINK_UP) ||
+                                   (link == BOND_LINK_FAIL)) {
+                                       if (port == 1)
+                                               v2p_port1 = 1;
+                                       else
+                                               v2p_port2 = 2;
+                               } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
+                                       if (port == 1)
+                                               v2p_port1 = 2;
+                                       else
+                                               v2p_port2 = 1;
+                               }
+                       }
+               }
+       }
+
+       mlx4_en_queue_bond_work(priv, do_bond,
+                               v2p_port1, v2p_port2);
+
+       return NOTIFY_DONE;
+}
+
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                        struct mlx4_en_port_profile *prof)
 {
@@ -2457,7 +2637,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
        netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
 
-       SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
+       SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
        dev->dev_port = port - 1;
 
        /*
@@ -2622,6 +2802,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        }
 
        mdev->pndev[port] = dev;
+       mdev->upper[port] = NULL;
 
        netif_carrier_off(dev);
        mlx4_en_set_default_moderation(priv);
index f1a5500ff72de1ee07d7d8bfd2dc91e02e2d6388..34f2fdf4fe5d214154714d3e9a672e47e23afdae 100644 (file)
@@ -50,10 +50,14 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
        context->mtu_msgmax = 0xff;
        if (!is_tx && !rss)
                context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
-       if (is_tx)
+       if (is_tx) {
                context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
-       else
+               if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)
+                       context->params2 |= MLX4_QP_BIT_FPP;
+
+       } else {
                context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
+       }
        context->usr_page = cpu_to_be32(mdev->priv_uar.index);
        context->local_qpn = cpu_to_be32(qpn);
        context->pri_path.ackto = 1 & 0x07;
index a0474eb94aa387de4c86826cb89fa994bc821436..698d60de1255269c11363c0196fd16800d5c4f13 100644 (file)
@@ -162,6 +162,10 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
                if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
                                     frag_info, GFP_KERNEL | __GFP_COLD))
                        goto out;
+
+               en_dbg(DRV, priv, "  frag %d allocator: - size:%d frags:%d\n",
+                      i, ring->page_alloc[i].page_size,
+                      atomic_read(&ring->page_alloc[i].page->_count));
        }
        return 0;
 
@@ -387,10 +391,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
                 ring->rx_info, tmp);
 
        /* Allocate HW buffers on provided NUMA node */
-       set_dev_node(&mdev->dev->pdev->dev, node);
+       set_dev_node(&mdev->dev->persist->pdev->dev, node);
        err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
                                 ring->buf_size, 2 * PAGE_SIZE);
-       set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
+       set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
        if (err)
                goto err_info;
 
@@ -1059,8 +1063,9 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
                        (eff_mtu > buf_size + frag_sizes[i]) ?
                                frag_sizes[i] : eff_mtu - buf_size;
                priv->frag_info[i].frag_prefix_size = buf_size;
-               priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i],
-                                                      SMP_CACHE_BYTES);
+               priv->frag_info[i].frag_stride =
+                               ALIGN(priv->frag_info[i].frag_size,
+                                     SMP_CACHE_BYTES);
                buf_size += priv->frag_info[i].frag_size;
                i++;
        }
index e3357bf523df866222bdabdd0ec4c31cb24bb2a7..55f9f5c5344e19a3d8d76b083e0fd73d44b9870b 100644 (file)
@@ -91,10 +91,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
        ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
 
        /* Allocate HW buffers on provided NUMA node */
-       set_dev_node(&mdev->dev->pdev->dev, node);
+       set_dev_node(&mdev->dev->persist->pdev->dev, node);
        err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
                                 2 * PAGE_SIZE);
-       set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
+       set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
        if (err) {
                en_err(priv, "Failed allocating hwq resources\n");
                goto err_bounce;
@@ -682,8 +682,8 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
        if (dev->num_tc)
                return skb_tx_hash(dev, skb);
 
-       if (vlan_tx_tag_present(skb))
-               up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
+       if (skb_vlan_tag_present(skb))
+               up = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
 
        return fallback(dev, skb) % rings_p_up + up * rings_p_up;
 }
@@ -742,8 +742,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                goto tx_drop;
        }
 
-       if (vlan_tx_tag_present(skb))
-               vlan_tag = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb))
+               vlan_tag = skb_vlan_tag_get(skb);
 
 
        netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
@@ -930,7 +930,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        real_size = (real_size / 16) & 0x3f;
 
        if (ring->bf_enabled && desc_size <= MAX_BF && !bounce &&
-           !vlan_tx_tag_present(skb) && send_doorbell) {
+           !skb_vlan_tag_present(skb) && send_doorbell) {
                tx_desc->ctrl.bf_qpn = ring->doorbell_qpn |
                                       cpu_to_be32(real_size);
 
@@ -952,7 +952,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        } else {
                tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
                tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
-                       !!vlan_tx_tag_present(skb);
+                       !!skb_vlan_tag_present(skb);
                tx_desc->ctrl.fence_size = real_size;
 
                /* Ensure new descriptor hits memory
index 3d275fbaf0eb05df05159a06fed22812469b6200..264bc15c1ff212ad649c3547c8a8ffc0b2c22e43 100644 (file)
@@ -88,6 +88,8 @@ static u64 get_async_ev_mask(struct mlx4_dev *dev)
        u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
                async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
+       if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
+               async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT);
 
        return async_ev_mask;
 }
@@ -237,7 +239,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
        struct mlx4_eqe eqe;
 
        /*don't send if we don't have the that slave */
-       if (dev->num_vfs < slave)
+       if (dev->persist->num_vfs < slave)
                return 0;
        memset(&eqe, 0, sizeof eqe);
 
@@ -255,7 +257,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
        struct mlx4_eqe eqe;
 
        /*don't send if we don't have the that slave */
-       if (dev->num_vfs < slave)
+       if (dev->persist->num_vfs < slave)
                return 0;
        memset(&eqe, 0, sizeof eqe);
 
@@ -310,7 +312,7 @@ static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
        struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
                                                                          port);
 
-       for (i = 0; i < dev->num_vfs + 1; i++)
+       for (i = 0; i < dev->persist->num_vfs + 1; i++)
                if (test_bit(i, slaves_pport.slaves))
                        set_and_calc_slave_port_state(dev, i, port,
                                                      event, &gen_event);
@@ -429,8 +431,14 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
                if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
                        mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
                                 i);
-
-                       mlx4_delete_all_resources_for_slave(dev, i);
+                       /* In case of 'Reset flow' FLR can be generated for
+                        * a slave before mlx4_load_one is done.
+                        * make sure interface is up before trying to delete
+                        * slave resources which weren't allocated yet.
+                        */
+                       if (dev->persist->interface_state &
+                           MLX4_INTERFACE_STATE_UP)
+                               mlx4_delete_all_resources_for_slave(dev, i);
                        /*return the slave to running mode*/
                        spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
                        slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
@@ -560,7 +568,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                mlx4_priv(dev)->sense.do_sense_port[port] = 1;
                                if (!mlx4_is_master(dev))
                                        break;
-                               for (i = 0; i < dev->num_vfs + 1; i++) {
+                               for (i = 0; i < dev->persist->num_vfs + 1;
+                                    i++) {
                                        if (!test_bit(i, slaves_port.slaves))
                                                continue;
                                        if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
@@ -596,7 +605,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                if (!mlx4_is_master(dev))
                                        break;
                                if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
-                                       for (i = 0; i < dev->num_vfs + 1; i++) {
+                                       for (i = 0;
+                                            i < dev->persist->num_vfs + 1;
+                                            i++) {
                                                if (!test_bit(i, slaves_port.slaves))
                                                        continue;
                                                if (i == mlx4_master_func_num(dev))
@@ -727,6 +738,26 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                            (unsigned long) eqe);
                        break;
 
+               case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT:
+                       switch (eqe->subtype) {
+                       case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE:
+                               mlx4_warn(dev, "Bad cable detected on port %u\n",
+                                         eqe->event.bad_cable.port);
+                               break;
+                       case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE:
+                               mlx4_warn(dev, "Unsupported cable detected\n");
+                               break;
+                       default:
+                               mlx4_dbg(dev,
+                                        "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n",
+                                        eqe->type, eqe->subtype, eq->eqn,
+                                        eq->cons_index, eqe->owner, eq->nent,
+                                        !!(eqe->owner & 0x80) ^
+                                        !!(eq->cons_index & eq->nent) ? "HW" : "SW");
+                               break;
+                       }
+                       break;
+
                case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
                case MLX4_EVENT_TYPE_ECC_DETECT:
                default:
@@ -837,12 +868,10 @@ static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                        MLX4_CMD_WRAPPED);
 }
 
-static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
-                        int eq_num)
+static int mlx4_HW2SW_EQ(struct mlx4_dev *dev,  int eq_num)
 {
-       return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num,
-                           0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
-                           MLX4_CMD_WRAPPED);
+       return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ,
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_num_eq_uar(struct mlx4_dev *dev)
@@ -865,7 +894,7 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
 
        if (!priv->eq_table.uar_map[index]) {
                priv->eq_table.uar_map[index] =
-                       ioremap(pci_resource_start(dev->pdev, 2) +
+                       ioremap(pci_resource_start(dev->persist->pdev, 2) +
                                ((eq->eqn / 4) << PAGE_SHIFT),
                                PAGE_SIZE);
                if (!priv->eq_table.uar_map[index]) {
@@ -928,8 +957,10 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
        eq_context = mailbox->buf;
 
        for (i = 0; i < npages; ++i) {
-               eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
-                                                         PAGE_SIZE, &t, GFP_KERNEL);
+               eq->page_list[i].buf = dma_alloc_coherent(&dev->persist->
+                                                         pdev->dev,
+                                                         PAGE_SIZE, &t,
+                                                         GFP_KERNEL);
                if (!eq->page_list[i].buf)
                        goto err_out_free_pages;
 
@@ -995,7 +1026,7 @@ err_out_free_eq:
 err_out_free_pages:
        for (i = 0; i < npages; ++i)
                if (eq->page_list[i].buf)
-                       dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+                       dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
                                          eq->page_list[i].buf,
                                          eq->page_list[i].map);
 
@@ -1013,7 +1044,6 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
                         struct mlx4_eq *eq)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_cmd_mailbox *mailbox;
        int err;
        int i;
        /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
@@ -1021,36 +1051,21 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
         */
        int npages = PAGE_ALIGN(dev->caps.eqe_size  * eq->nent) / PAGE_SIZE;
 
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return;
-
-       err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
+       err = mlx4_HW2SW_EQ(dev, eq->eqn);
        if (err)
                mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
 
-       if (0) {
-               mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
-               for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
-                       if (i % 4 == 0)
-                               pr_cont("[%02x] ", i * 4);
-                       pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
-                       if ((i + 1) % 4 == 0)
-                               pr_cont("\n");
-               }
-       }
        synchronize_irq(eq->irq);
        tasklet_disable(&eq->tasklet_ctx.task);
 
        mlx4_mtt_cleanup(dev, &eq->mtt);
        for (i = 0; i < npages; ++i)
-               dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
-                                   eq->page_list[i].buf,
-                                   eq->page_list[i].map);
+               dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
+                                 eq->page_list[i].buf,
+                                 eq->page_list[i].map);
 
        kfree(eq->page_list);
        mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
-       mlx4_free_cmd_mailbox(dev, mailbox);
 }
 
 static void mlx4_free_irqs(struct mlx4_dev *dev)
@@ -1060,7 +1075,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
        int     i, vec;
 
        if (eq_table->have_irq)
-               free_irq(dev->pdev->irq, dev);
+               free_irq(dev->persist->pdev->irq, dev);
 
        for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
                if (eq_table->eq[i].have_irq) {
@@ -1089,7 +1104,8 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
 
-       priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
+       priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev,
+                                priv->fw.clr_int_bar) +
                                 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
        if (!priv->clr_base) {
                mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
@@ -1212,13 +1228,13 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
                                         i * MLX4_IRQNAME_SIZE,
                                         MLX4_IRQNAME_SIZE,
                                         "mlx4-comp-%d@pci:%s", i,
-                                        pci_name(dev->pdev));
+                                        pci_name(dev->persist->pdev));
                        } else {
                                snprintf(priv->eq_table.irq_names +
                                         i * MLX4_IRQNAME_SIZE,
                                         MLX4_IRQNAME_SIZE,
                                         "mlx4-async@pci:%s",
-                                        pci_name(dev->pdev));
+                                        pci_name(dev->persist->pdev));
                        }
 
                        eq_name = priv->eq_table.irq_names +
@@ -1235,8 +1251,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
                snprintf(priv->eq_table.irq_names,
                         MLX4_IRQNAME_SIZE,
                         DRV_NAME "@pci:%s",
-                        pci_name(dev->pdev));
-               err = request_irq(dev->pdev->irq, mlx4_interrupt,
+                        pci_name(dev->persist->pdev));
+               err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
                                  IRQF_SHARED, priv->eq_table.irq_names, dev);
                if (err)
                        goto err_out_async;
index 982861d1df44d706cba4884bfb1caa19fe9f8f11..5a21e5dc94cbae7f8c35d989aba039afcb5c4f77 100644 (file)
@@ -84,13 +84,10 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
                [ 1] = "UC transport",
                [ 2] = "UD transport",
                [ 3] = "XRC transport",
-               [ 4] = "reliable multicast",
-               [ 5] = "FCoIB support",
                [ 6] = "SRQ support",
                [ 7] = "IPoIB checksum offload",
                [ 8] = "P_Key violation counter",
                [ 9] = "Q_Key violation counter",
-               [10] = "VMM",
                [12] = "Dual Port Different Protocol (DPDP) support",
                [15] = "Big LSO headers",
                [16] = "MW support",
@@ -99,12 +96,11 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
                [19] = "Raw multicast support",
                [20] = "Address vector port checking support",
                [21] = "UD multicast support",
-               [24] = "Demand paging support",
-               [25] = "Router support",
                [30] = "IBoE support",
                [32] = "Unicast loopback support",
                [34] = "FCS header control",
-               [38] = "Wake On LAN support",
+               [37] = "Wake On LAN (port1) support",
+               [38] = "Wake On LAN (port2) support",
                [40] = "UDP RSS support",
                [41] = "Unicast VEP steering support",
                [42] = "Multicast VEP steering support",
@@ -145,7 +141,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
                [16] = "CONFIG DEV support",
                [17] = "Asymmetric EQs support",
                [18] = "More than 80 VFs support",
-               [19] = "Performance optimized for limited rule configuration flow steering support"
+               [19] = "Performance optimized for limited rule configuration flow steering support",
+               [20] = "Recoverable error events support",
+               [21] = "Port Remap support"
        };
        int i;
 
@@ -259,6 +257,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP    0x28
 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET           0x2c
 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET      0x30
+#define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET     0x48
 
 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET         0x50
 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET         0x54
@@ -273,6 +272,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
 #define QUERY_FUNC_CAP_FLAG_RDMA               0x40
 #define QUERY_FUNC_CAP_FLAG_ETH                        0x80
 #define QUERY_FUNC_CAP_FLAG_QUOTAS             0x10
+#define QUERY_FUNC_CAP_FLAG_RESD_LKEY          0x08
 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX      0x04
 
 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG    (1UL << 31)
@@ -344,9 +344,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
        } else if (vhcr->op_modifier == 0) {
                struct mlx4_active_ports actv_ports =
                        mlx4_get_active_ports(dev, slave);
-               /* enable rdma and ethernet interfaces, and new quota locations */
+               /* enable rdma and ethernet interfaces, new quota locations,
+                * and reserved lkey
+                */
                field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
-                        QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX);
+                        QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX |
+                        QUERY_FUNC_CAP_FLAG_RESD_LKEY);
                MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
 
                field = min(
@@ -411,6 +414,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
                size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG |
                        QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG;
                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
+
+               size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
        } else
                err = -EINVAL;
 
@@ -503,6 +509,13 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
                MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
                func_cap->reserved_eq = size & 0xFFFFFF;
 
+               if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) {
+                       MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
+                       func_cap->reserved_lkey = size;
+               } else {
+                       func_cap->reserved_lkey = 0;
+               }
+
                func_cap->extra_flags = 0;
 
                /* Mailbox data from 0x6c and onward should only be treated if
@@ -851,6 +864,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
        MLX4_GET(dev_cap->bmme_flags, outbox,
                 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+       if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP)
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
        if (field & 0x20)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
@@ -859,6 +874,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
        if (field32 & (1 << 0))
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
+       if (field32 & (1 << 7))
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
        if (field & 1<<6)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
@@ -1106,9 +1123,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
        field &= 0x7f;
        MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
 
-       /* For guests, disable mw type 2 */
+       /* For guests, disable mw type 2 and port remap*/
        MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
        bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
+       bmme_flags &= ~MLX4_FLAG_PORT_REMAP;
        MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
 
        /* turn off device-managed steering capability if not enabled */
@@ -1562,6 +1580,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
 #define INIT_HCA_VXLAN_OFFSET           0x0c
 #define INIT_HCA_CACHELINE_SZ_OFFSET    0x0e
 #define INIT_HCA_FLAGS_OFFSET           0x014
+#define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
 #define INIT_HCA_QPC_OFFSET             0x020
 #define         INIT_HCA_QPC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x10)
 #define         INIT_HCA_LOG_QP_OFFSET          (INIT_HCA_QPC_OFFSET + 0x17)
@@ -1668,6 +1687,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
                dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
        }
 
+       if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
+               *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
+
        /* QPC/EEC/CQC/EQC/RDMARC attributes */
 
        MLX4_PUT(inbox, param->qpc_base,      INIT_HCA_QPC_BASE_OFFSET);
@@ -1752,8 +1774,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
                MLX4_PUT(inbox, parser_params,  INIT_HCA_VXLAN_OFFSET);
        }
 
-       err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
-                      MLX4_CMD_NATIVE);
+       err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA,
+                      MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
 
        if (err)
                mlx4_err(dev, "INIT_HCA returns %d\n", err);
@@ -1879,6 +1901,36 @@ out:
        return err;
 }
 
+static int mlx4_hca_core_clock_update(struct mlx4_dev *dev)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       __be32 *outbox;
+       int err;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox)) {
+               mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n");
+               return PTR_ERR(mailbox);
+       }
+       outbox = mailbox->buf;
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
+                          MLX4_CMD_QUERY_HCA,
+                          MLX4_CMD_TIME_CLASS_B,
+                          !mlx4_is_slave(dev));
+       if (err) {
+               mlx4_warn(dev, "hca_core_clock update failed\n");
+               goto out;
+       }
+
+       MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+       return err;
+}
+
 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
  * and real QP0 are active, so that the paravirtualized QP0 is ready
  * to operate */
@@ -1983,6 +2035,9 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
                err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
                               MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 
+       if (!err)
+               mlx4_hca_core_clock_update(dev);
+
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
@@ -2007,7 +2062,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
        if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
                if (priv->mfunc.master.init_port_ref[port] == 1) {
                        err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
-                                      1000, MLX4_CMD_NATIVE);
+                                      MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
                        if (err)
                                return err;
                }
@@ -2018,7 +2073,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
                        if (!priv->mfunc.master.qp0_state[port].qp0_active &&
                            priv->mfunc.master.qp0_state[port].port_active) {
                                err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
-                                              1000, MLX4_CMD_NATIVE);
+                                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
                                if (err)
                                        return err;
                                priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
@@ -2033,15 +2088,15 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
 
 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
 {
-       return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
-                       MLX4_CMD_WRAPPED);
+       return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
 
 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
 {
-       return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
-                       MLX4_CMD_NATIVE);
+       return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA,
+                       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
 }
 
 struct mlx4_config_dev {
@@ -2049,13 +2104,16 @@ struct mlx4_config_dev {
        __be32  rsvd1[3];
        __be16  vxlan_udp_dport;
        __be16  rsvd2;
-       __be32  rsvd3[27];
-       __be16  rsvd4;
-       u8      rsvd5;
+       __be32  rsvd3;
+       __be32  roce_flags;
+       __be32  rsvd4[25];
+       __be16  rsvd5;
+       u8      rsvd6;
        u8      rx_checksum_val;
 };
 
 #define MLX4_VXLAN_UDP_DPORT (1 << 0)
+#define MLX4_DISABLE_RX_PORT BIT(18)
 
 static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
 {
@@ -2111,7 +2169,7 @@ static const u8 config_dev_csum_flags[] = {
 int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
                              struct mlx4_config_dev_params *params)
 {
-       struct mlx4_config_dev config_dev;
+       struct mlx4_config_dev config_dev = {0};
        int err;
        u8 csum_mask;
 
@@ -2158,6 +2216,45 @@ int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
 }
 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
 
+#define CONFIG_DISABLE_RX_PORT BIT(15)
+int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis)
+{
+       struct mlx4_config_dev config_dev;
+
+       memset(&config_dev, 0, sizeof(config_dev));
+       config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT);
+       if (dis)
+               config_dev.roce_flags =
+                       cpu_to_be32(CONFIG_DISABLE_RX_PORT);
+
+       return mlx4_CONFIG_DEV_set(dev, &config_dev);
+}
+
+int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct {
+               __be32 v_port1;
+               __be32 v_port2;
+       } *v2p;
+       int err;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return -ENOMEM;
+
+       v2p = mailbox->buf;
+       v2p->v_port1 = cpu_to_be32(port1);
+       v2p->v_port2 = cpu_to_be32(port2);
+
+       err = mlx4_cmd(dev, mailbox->dma, 0,
+                      MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
 
 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
 {
@@ -2180,7 +2277,8 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
 int mlx4_NOP(struct mlx4_dev *dev)
 {
        /* Input modifier of 0x1f means "finish as soon as possible." */
-       return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
+       return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_NATIVE);
 }
 
 int mlx4_get_phys_port_id(struct mlx4_dev *dev)
index 62562b60fa876e5577e8940d2bb5d986eacbf565..f44f7f6017ed589f5c8a184666b82e00f37f47a8 100644 (file)
@@ -147,6 +147,7 @@ struct mlx4_func_cap {
        u32     qp0_proxy_qpn;
        u32     qp1_tunnel_qpn;
        u32     qp1_proxy_qpn;
+       u32     reserved_lkey;
        u8      physical_port;
        u8      port_flags;
        u8      flags1;
index 97c9b1db1d275ee15d7fd1a98937032a329db03b..2a9dd460a95f8149d884af048dc651eaaec4904f 100644 (file)
@@ -56,7 +56,7 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
        int i;
 
        if (chunk->nsg > 0)
-               pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
+               pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
                             PCI_DMA_BIDIRECTIONAL);
 
        for (i = 0; i < chunk->npages; ++i)
@@ -69,7 +69,8 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
        int i;
 
        for (i = 0; i < chunk->npages; ++i)
-               dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
+               dma_free_coherent(&dev->persist->pdev->dev,
+                                 chunk->mem[i].length,
                                  lowmem_page_address(sg_page(&chunk->mem[i])),
                                  sg_dma_address(&chunk->mem[i]));
 }
@@ -173,7 +174,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
                        --cur_order;
 
                if (coherent)
-                       ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
+                       ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
                                                      &chunk->mem[chunk->npages],
                                                      cur_order, gfp_mask);
                else
@@ -193,7 +194,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
                if (coherent)
                        ++chunk->nsg;
                else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
-                       chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
+                       chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
                                                chunk->npages,
                                                PCI_DMA_BIDIRECTIONAL);
 
@@ -208,7 +209,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
        }
 
        if (!coherent && chunk) {
-               chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
+               chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
                                        chunk->npages,
                                        PCI_DMA_BIDIRECTIONAL);
 
index 116895ac8b353afa4f461564ae8bac8529803be2..6fce58718837202bd82739dd8592b753ece7ef42 100644 (file)
 
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/errno.h>
 
 #include "mlx4.h"
 
 struct mlx4_device_context {
        struct list_head        list;
+       struct list_head        bond_list;
        struct mlx4_interface  *intf;
        void                   *context;
 };
@@ -115,6 +117,58 @@ void mlx4_unregister_interface(struct mlx4_interface *intf)
 }
 EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
 
+int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx;
+       unsigned long flags;
+       int ret;
+       LIST_HEAD(bond_list);
+
+       if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
+               return -ENOTSUPP;
+
+       ret = mlx4_disable_rx_port_check(dev, enable);
+       if (ret) {
+               mlx4_err(dev, "Fail to %s rx port check\n",
+                        enable ? "enable" : "disable");
+               return ret;
+       }
+       if (enable) {
+               dev->flags |= MLX4_FLAG_BONDED;
+       } else {
+                ret = mlx4_virt2phy_port_map(dev, 1, 2);
+               if (ret) {
+                       mlx4_err(dev, "Fail to reset port map\n");
+                       return ret;
+               }
+               dev->flags &= ~MLX4_FLAG_BONDED;
+       }
+
+       spin_lock_irqsave(&priv->ctx_lock, flags);
+       list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) {
+               if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) {
+                       list_add_tail(&dev_ctx->bond_list, &bond_list);
+                       list_del(&dev_ctx->list);
+               }
+       }
+       spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+       list_for_each_entry(dev_ctx, &bond_list, bond_list) {
+               dev_ctx->intf->remove(dev, dev_ctx->context);
+               dev_ctx->context =  dev_ctx->intf->add(dev);
+
+               spin_lock_irqsave(&priv->ctx_lock, flags);
+               list_add_tail(&dev_ctx->list, &priv->ctx_list);
+               spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+               mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n",
+                        dev_ctx->intf->protocol, enable ?
+                        "enabled" : "disabled");
+       }
+       return 0;
+}
+
 void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
                         unsigned long param)
 {
@@ -138,13 +192,13 @@ int mlx4_register_device(struct mlx4_dev *dev)
 
        mutex_lock(&intf_mutex);
 
+       dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP;
        list_add_tail(&priv->dev_list, &dev_list);
        list_for_each_entry(intf, &intf_list, list)
                mlx4_add_device(intf, priv);
 
        mutex_unlock(&intf_mutex);
-       if (!mlx4_is_slave(dev))
-               mlx4_start_catas_poll(dev);
+       mlx4_start_catas_poll(dev);
 
        return 0;
 }
@@ -154,14 +208,14 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_interface *intf;
 
-       if (!mlx4_is_slave(dev))
-               mlx4_stop_catas_poll(dev);
+       mlx4_stop_catas_poll(dev);
        mutex_lock(&intf_mutex);
 
        list_for_each_entry(intf, &intf_list, list)
                mlx4_remove_device(intf, priv);
 
        list_del(&priv->dev_list);
+       dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP;
 
        mutex_unlock(&intf_mutex);
 }
index 943cbd47d832bb98719e355a727e8451c68bfbbe..7e487223489a467071155f0e67ea052ba2b18949 100644 (file)
@@ -108,6 +108,8 @@ MODULE_PARM_DESC(enable_64b_cqe_eqe,
                                         MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
                                         MLX4_FUNC_CAP_DMFS_A0_STATIC)
 
+#define RESET_PERSIST_MASK_FLAGS       (MLX4_FLAG_SRIOV)
+
 static char mlx4_version[] =
        DRV_NAME ": Mellanox ConnectX core driver v"
        DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -249,7 +251,8 @@ static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
                if (mlx4_is_master(dev))
                        dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
        } else {
-               mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n");
+               if (cache_line_size() != 32  && cache_line_size() != 64)
+                       mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
                dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
                dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
        }
@@ -318,10 +321,11 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                return -ENODEV;
        }
 
-       if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
+       if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
                mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
                         dev_cap->uar_size,
-                        (unsigned long long) pci_resource_len(dev->pdev, 2));
+                        (unsigned long long)
+                        pci_resource_len(dev->persist->pdev, 2));
                return -ENODEV;
        }
 
@@ -541,8 +545,10 @@ static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
        *speed = PCI_SPEED_UNKNOWN;
        *width = PCIE_LNK_WIDTH_UNKNOWN;
 
-       err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1);
-       err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2);
+       err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP,
+                                         &lnkcap1);
+       err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2,
+                                         &lnkcap2);
        if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
                if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
                        *speed = PCIE_SPEED_8_0GT;
@@ -587,7 +593,7 @@ static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
                return;
        }
 
-       err = pcie_get_minimum_link(dev->pdev, &speed, &width);
+       err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width);
        if (err || speed == PCI_SPEED_UNKNOWN ||
            width == PCIE_LNK_WIDTH_UNKNOWN) {
                mlx4_warn(dev,
@@ -792,6 +798,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        dev->caps.num_mpts              = 1 << hca_param.log_mpt_sz;
        dev->caps.num_eqs               = func_cap.max_eq;
        dev->caps.reserved_eqs          = func_cap.reserved_eq;
+       dev->caps.reserved_lkey         = func_cap.reserved_lkey;
        dev->caps.num_pds               = MLX4_NUM_PDS;
        dev->caps.num_mgms              = 0;
        dev->caps.num_amgms             = 0;
@@ -837,10 +844,12 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
 
        if (dev->caps.uar_page_size * (dev->caps.num_uars -
                                       dev->caps.reserved_uars) >
-                                      pci_resource_len(dev->pdev, 2)) {
+                                      pci_resource_len(dev->persist->pdev,
+                                                       2)) {
                mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
                         dev->caps.uar_page_size * dev->caps.num_uars,
-                        (unsigned long long) pci_resource_len(dev->pdev, 2));
+                        (unsigned long long)
+                        pci_resource_len(dev->persist->pdev, 2));
                goto err_mem;
        }
 
@@ -1152,6 +1161,91 @@ err_set_port:
        return err ? err : count;
 }
 
+int mlx4_bond(struct mlx4_dev *dev)
+{
+       int ret = 0;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       mutex_lock(&priv->bond_mutex);
+
+       if (!mlx4_is_bonded(dev))
+               ret = mlx4_do_bond(dev, true);
+       else
+               ret = 0;
+
+       mutex_unlock(&priv->bond_mutex);
+       if (ret)
+               mlx4_err(dev, "Failed to bond device: %d\n", ret);
+       else
+               mlx4_dbg(dev, "Device is bonded\n");
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_bond);
+
+int mlx4_unbond(struct mlx4_dev *dev)
+{
+       int ret = 0;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       mutex_lock(&priv->bond_mutex);
+
+       if (mlx4_is_bonded(dev))
+               ret = mlx4_do_bond(dev, false);
+
+       mutex_unlock(&priv->bond_mutex);
+       if (ret)
+               mlx4_err(dev, "Failed to unbond device: %d\n", ret);
+       else
+               mlx4_dbg(dev, "Device is unbonded\n");
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_unbond);
+
+
+int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
+{
+       u8 port1 = v2p->port1;
+       u8 port2 = v2p->port2;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int err;
+
+       if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
+               return -ENOTSUPP;
+
+       mutex_lock(&priv->bond_mutex);
+
+       /* zero means keep current mapping for this port */
+       if (port1 == 0)
+               port1 = priv->v2p.port1;
+       if (port2 == 0)
+               port2 = priv->v2p.port2;
+
+       if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
+           (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
+           (port1 == 2 && port2 == 1)) {
+               /* besides boundary checks cross mapping makes
+                * no sense and therefore not allowed */
+               err = -EINVAL;
+       } else if ((port1 == priv->v2p.port1) &&
+                (port2 == priv->v2p.port2)) {
+               err = 0;
+       } else {
+               err = mlx4_virt2phy_port_map(dev, port1, port2);
+               if (!err) {
+                       mlx4_dbg(dev, "port map changed: [%d][%d]\n",
+                                port1, port2);
+                       priv->v2p.port1 = port1;
+                       priv->v2p.port2 = port2;
+               } else {
+                       mlx4_err(dev, "Failed to change port mape: %d\n", err);
+               }
+       }
+
+       mutex_unlock(&priv->bond_mutex);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_port_map_set);
+
 static int mlx4_load_fw(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1477,7 +1571,8 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
 
        mutex_lock(&priv->cmd.slave_cmd_mutex);
-       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
+                         MLX4_COMM_TIME))
                mlx4_warn(dev, "Failed to close slave function\n");
        mutex_unlock(&priv->cmd.slave_cmd_mutex);
 }
@@ -1492,9 +1587,9 @@ static int map_bf_area(struct mlx4_dev *dev)
        if (!dev->caps.bf_reg_size)
                return -ENXIO;
 
-       bf_start = pci_resource_start(dev->pdev, 2) +
+       bf_start = pci_resource_start(dev->persist->pdev, 2) +
                        (dev->caps.num_uars << PAGE_SHIFT);
-       bf_len = pci_resource_len(dev->pdev, 2) -
+       bf_len = pci_resource_len(dev->persist->pdev, 2) -
                        (dev->caps.num_uars << PAGE_SHIFT);
        priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
        if (!priv->bf_mapping)
@@ -1536,7 +1631,8 @@ static int map_internal_clock(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
 
        priv->clock_mapping =
-               ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) +
+               ioremap(pci_resource_start(dev->persist->pdev,
+                                          priv->fw.clock_bar) +
                        priv->fw.clock_offset, MLX4_CLOCK_SIZE);
 
        if (!priv->clock_mapping)
@@ -1573,6 +1669,50 @@ static void mlx4_close_fw(struct mlx4_dev *dev)
        }
 }
 
+static int mlx4_comm_check_offline(struct mlx4_dev *dev)
+{
+#define COMM_CHAN_OFFLINE_OFFSET 0x09
+
+       u32 comm_flags;
+       u32 offline_bit;
+       unsigned long end;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies;
+       while (time_before(jiffies, end)) {
+               comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
+                                         MLX4_COMM_CHAN_FLAGS));
+               offline_bit = (comm_flags &
+                              (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
+               if (!offline_bit)
+                       return 0;
+               /* There are cases as part of AER/Reset flow that PF needs
+                * around 100 msec to load. We therefore sleep for 100 msec
+                * to allow other tasks to make use of that CPU during this
+                * time interval.
+                */
+               msleep(100);
+       }
+       mlx4_err(dev, "Communication channel is offline.\n");
+       return -EIO;
+}
+
+static void mlx4_reset_vf_support(struct mlx4_dev *dev)
+{
+#define COMM_CHAN_RST_OFFSET 0x1e
+
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       u32 comm_rst;
+       u32 comm_caps;
+
+       comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm +
+                                MLX4_COMM_CHAN_CAPS));
+       comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET));
+
+       if (comm_rst)
+               dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
+}
+
 static int mlx4_init_slave(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1588,9 +1728,15 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
 
        mutex_lock(&priv->cmd.slave_cmd_mutex);
        priv->cmd.max_cmds = 1;
+       if (mlx4_comm_check_offline(dev)) {
+               mlx4_err(dev, "PF is not responsive, skipping initialization\n");
+               goto err_offline;
+       }
+
+       mlx4_reset_vf_support(dev);
        mlx4_warn(dev, "Sending reset\n");
        ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
-                                      MLX4_COMM_TIME);
+                                      MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME);
        /* if we are in the middle of flr the slave will try
         * NUM_OF_RESET_RETRIES times before leaving.*/
        if (ret_from_reset) {
@@ -1615,22 +1761,24 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
 
        mlx4_warn(dev, "Sending vhcr0\n");
        if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
-                                                   MLX4_COMM_TIME))
+                            MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
                goto err;
        if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
-                                                   MLX4_COMM_TIME))
+                            MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
                goto err;
        if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
-                                                   MLX4_COMM_TIME))
+                            MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
                goto err;
-       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
+                         MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
                goto err;
 
        mutex_unlock(&priv->cmd.slave_cmd_mutex);
        return 0;
 
 err:
-       mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
+       mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
+err_offline:
        mutex_unlock(&priv->cmd.slave_cmd_mutex);
        return -EIO;
 }
@@ -1705,7 +1853,8 @@ static void choose_steering_mode(struct mlx4_dev *dev,
        if (mlx4_log_num_mgm_entry_size <= 0 &&
            dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
            (!mlx4_is_mfunc(dev) ||
-            (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) &&
+            (dev_cap->fs_max_num_qp_per_entry >=
+            (dev->persist->num_vfs + 1))) &&
            choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
                MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
                dev->oper_log_mgm_entry_size =
@@ -1744,8 +1893,7 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
                                       struct mlx4_dev_cap *dev_cap)
 {
        if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
-           dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS &&
-           dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
+           dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
                dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
        else
                dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
@@ -1829,7 +1977,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                err = mlx4_dev_cap(dev, &dev_cap);
                if (err) {
                        mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
-                       goto err_stop_fw;
+                       return err;
                }
 
                choose_steering_mode(dev, &dev_cap);
@@ -1860,7 +2008,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                                             &init_hca);
                if ((long long) icm_size < 0) {
                        err = icm_size;
-                       goto err_stop_fw;
+                       return err;
                }
 
                dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
@@ -1874,7 +2022,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
 
                err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
                if (err)
-                       goto err_stop_fw;
+                       return err;
 
                err = mlx4_INIT_HCA(dev, &init_hca);
                if (err) {
@@ -1886,7 +2034,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                        err = mlx4_query_func(dev, &dev_cap);
                        if (err < 0) {
                                mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
-                               goto err_stop_fw;
+                               goto err_close;
                        } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
                                dev->caps.num_eqs = dev_cap.max_eqs;
                                dev->caps.reserved_eqs = dev_cap.reserved_eqs;
@@ -2006,11 +2154,6 @@ err_free_icm:
        if (!mlx4_is_slave(dev))
                mlx4_free_icms(dev);
 
-err_stop_fw:
-       if (!mlx4_is_slave(dev)) {
-               mlx4_UNMAP_FA(dev);
-               mlx4_free_icm(dev, priv->fw.fw_icm, 0);
-       }
        return err;
 }
 
@@ -2293,7 +2436,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
                for (i = 0; i < nreq; ++i)
                        entries[i].entry = i;
 
-               nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);
+               nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
+                                            nreq);
 
                if (nreq < 0) {
                        kfree(entries);
@@ -2321,7 +2465,7 @@ no_msi:
        dev->caps.comp_pool        = 0;
 
        for (i = 0; i < 2; ++i)
-               priv->eq_table.eq[i].irq = dev->pdev->irq;
+               priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
 }
 
 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
@@ -2349,7 +2493,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
        info->port_attr.show      = show_port_type;
        sysfs_attr_init(&info->port_attr.attr);
 
-       err = device_create_file(&dev->pdev->dev, &info->port_attr);
+       err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
        if (err) {
                mlx4_err(dev, "Failed to create file for port %d\n", port);
                info->port = -1;
@@ -2366,10 +2510,12 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
        info->port_mtu_attr.show      = show_port_ib_mtu;
        sysfs_attr_init(&info->port_mtu_attr.attr);
 
-       err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr);
+       err = device_create_file(&dev->persist->pdev->dev,
+                                &info->port_mtu_attr);
        if (err) {
                mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
-               device_remove_file(&info->dev->pdev->dev, &info->port_attr);
+               device_remove_file(&info->dev->persist->pdev->dev,
+                                  &info->port_attr);
                info->port = -1;
        }
 
@@ -2381,8 +2527,9 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
        if (info->port < 0)
                return;
 
-       device_remove_file(&info->dev->pdev->dev, &info->port_attr);
-       device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr);
+       device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
+       device_remove_file(&info->dev->persist->pdev->dev,
+                          &info->port_mtu_attr);
 }
 
 static int mlx4_init_steering(struct mlx4_dev *dev)
@@ -2449,10 +2596,11 @@ static int mlx4_get_ownership(struct mlx4_dev *dev)
        void __iomem *owner;
        u32 ret;
 
-       if (pci_channel_offline(dev->pdev))
+       if (pci_channel_offline(dev->persist->pdev))
                return -EIO;
 
-       owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+       owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
+                       MLX4_OWNER_BASE,
                        MLX4_OWNER_SIZE);
        if (!owner) {
                mlx4_err(dev, "Failed to obtain ownership bit\n");
@@ -2468,10 +2616,11 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
 {
        void __iomem *owner;
 
-       if (pci_channel_offline(dev->pdev))
+       if (pci_channel_offline(dev->persist->pdev))
                return;
 
-       owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+       owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
+                       MLX4_OWNER_BASE,
                        MLX4_OWNER_SIZE);
        if (!owner) {
                mlx4_err(dev, "Failed to obtain ownership bit\n");
@@ -2486,11 +2635,19 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
                                  !!((flags) & MLX4_FLAG_MASTER))
 
 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
-                            u8 total_vfs, int existing_vfs)
+                            u8 total_vfs, int existing_vfs, int reset_flow)
 {
        u64 dev_flags = dev->flags;
        int err = 0;
 
+       if (reset_flow) {
+               dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
+                                      GFP_KERNEL);
+               if (!dev->dev_vfs)
+                       goto free_mem;
+               return dev_flags;
+       }
+
        atomic_inc(&pf_loading);
        if (dev->flags &  MLX4_FLAG_SRIOV) {
                if (existing_vfs != total_vfs) {
@@ -2519,13 +2676,14 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
                dev_flags |= MLX4_FLAG_SRIOV |
                        MLX4_FLAG_MASTER;
                dev_flags &= ~MLX4_FLAG_SLAVE;
-               dev->num_vfs = total_vfs;
+               dev->persist->num_vfs = total_vfs;
        }
        return dev_flags;
 
 disable_sriov:
        atomic_dec(&pf_loading);
-       dev->num_vfs = 0;
+free_mem:
+       dev->persist->num_vfs = 0;
        kfree(dev->dev_vfs);
        return dev_flags & ~MLX4_FLAG_MASTER;
 }
@@ -2549,7 +2707,8 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
 }
 
 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
-                        int total_vfs, int *nvfs, struct mlx4_priv *priv)
+                        int total_vfs, int *nvfs, struct mlx4_priv *priv,
+                        int reset_flow)
 {
        struct mlx4_dev *dev;
        unsigned sum = 0;
@@ -2565,6 +2724,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
        spin_lock_init(&priv->ctx_lock);
 
        mutex_init(&priv->port_mutex);
+       mutex_init(&priv->bond_mutex);
 
        INIT_LIST_HEAD(&priv->pgdir_list);
        mutex_init(&priv->pgdir_mutex);
@@ -2612,10 +2772,15 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
                        existing_vfs = pci_num_vf(pdev);
                        if (existing_vfs)
                                dev->flags |= MLX4_FLAG_SRIOV;
-                       dev->num_vfs = total_vfs;
+                       dev->persist->num_vfs = total_vfs;
                }
        }
 
+       /* on load remove any previous indication of internal error,
+        * device is up.
+        */
+       dev->persist->state = MLX4_DEVICE_STATE_UP;
+
 slave_start:
        err = mlx4_cmd_init(dev);
        if (err) {
@@ -2666,8 +2831,10 @@ slave_start:
                                goto err_fw;
 
                        if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
-                               u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
-                                                                 existing_vfs);
+                               u64 dev_flags = mlx4_enable_sriov(dev, pdev,
+                                                                 total_vfs,
+                                                                 existing_vfs,
+                                                                 reset_flow);
 
                                mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
                                dev->flags = dev_flags;
@@ -2709,7 +2876,7 @@ slave_start:
                        if (dev->flags & MLX4_FLAG_SRIOV) {
                                if (!existing_vfs)
                                        pci_disable_sriov(pdev);
-                               if (mlx4_is_master(dev))
+                               if (mlx4_is_master(dev) && !reset_flow)
                                        atomic_dec(&pf_loading);
                                dev->flags &= ~MLX4_FLAG_SRIOV;
                        }
@@ -2723,7 +2890,8 @@ slave_start:
        }
 
        if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
-               u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs);
+               u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
+                                                 existing_vfs, reset_flow);
 
                if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
                        mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
@@ -2776,12 +2944,14 @@ slave_start:
                                 dev->caps.num_ports);
                        goto err_close;
                }
-               memcpy(dev->nvfs, nvfs, sizeof(dev->nvfs));
+               memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
 
-               for (i = 0; i < sizeof(dev->nvfs)/sizeof(dev->nvfs[0]); i++) {
+               for (i = 0;
+                    i < sizeof(dev->persist->nvfs)/
+                    sizeof(dev->persist->nvfs[0]); i++) {
                        unsigned j;
 
-                       for (j = 0; j < dev->nvfs[i]; ++sum, ++j) {
+                       for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
                                dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
                                dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
                                        dev->caps.num_ports;
@@ -2833,6 +3003,17 @@ slave_start:
                goto err_steer;
 
        mlx4_init_quotas(dev);
+       /* When PF resources are ready arm its comm channel to enable
+        * getting commands
+        */
+       if (mlx4_is_master(dev)) {
+               err = mlx4_ARM_COMM_CHANNEL(dev);
+               if (err) {
+                       mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
+                                err);
+                       goto err_steer;
+               }
+       }
 
        for (port = 1; port <= dev->caps.num_ports; port++) {
                err = mlx4_init_port_info(dev, port);
@@ -2840,6 +3021,9 @@ slave_start:
                        goto err_port;
        }
 
+       priv->v2p.port1 = 1;
+       priv->v2p.port2 = 2;
+
        err = mlx4_register_device(dev);
        if (err)
                goto err_port;
@@ -2851,7 +3035,7 @@ slave_start:
 
        priv->removed = 0;
 
-       if (mlx4_is_master(dev) && dev->num_vfs)
+       if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
                atomic_dec(&pf_loading);
 
        kfree(dev_cap);
@@ -2885,8 +3069,10 @@ err_free_eq:
        mlx4_free_eq_table(dev);
 
 err_master_mfunc:
-       if (mlx4_is_master(dev))
+       if (mlx4_is_master(dev)) {
+               mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
                mlx4_multi_func_cleanup(dev);
+       }
 
        if (mlx4_is_slave(dev)) {
                kfree(dev->caps.qp0_qkey);
@@ -2910,10 +3096,12 @@ err_cmd:
        mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
 
 err_sriov:
-       if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs)
+       if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
                pci_disable_sriov(pdev);
+               dev->flags &= ~MLX4_FLAG_SRIOV;
+       }
 
-       if (mlx4_is_master(dev) && dev->num_vfs)
+       if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
                atomic_dec(&pf_loading);
 
        kfree(priv->dev.dev_vfs);
@@ -3054,11 +3242,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
                }
        }
 
-       err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv);
+       err = mlx4_catas_init(&priv->dev);
        if (err)
                goto err_release_regions;
+
+       err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
+       if (err)
+               goto err_catas;
+
        return 0;
 
+err_catas:
+       mlx4_catas_end(&priv->dev);
+
 err_release_regions:
        pci_release_regions(pdev);
 
@@ -3081,38 +3277,60 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
                return -ENOMEM;
 
        dev       = &priv->dev;
-       dev->pdev = pdev;
-       pci_set_drvdata(pdev, dev);
+       dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
+       if (!dev->persist) {
+               kfree(priv);
+               return -ENOMEM;
+       }
+       dev->persist->pdev = pdev;
+       dev->persist->dev = dev;
+       pci_set_drvdata(pdev, dev->persist);
        priv->pci_dev_data = id->driver_data;
+       mutex_init(&dev->persist->device_state_mutex);
+       mutex_init(&dev->persist->interface_state_mutex);
 
        ret =  __mlx4_init_one(pdev, id->driver_data, priv);
-       if (ret)
+       if (ret) {
+               kfree(dev->persist);
                kfree(priv);
+       } else {
+               pci_save_state(pdev);
+       }
 
        return ret;
 }
 
+static void mlx4_clean_dev(struct mlx4_dev *dev)
+{
+       struct mlx4_dev_persistent *persist = dev->persist;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       unsigned long   flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
+
+       memset(priv, 0, sizeof(*priv));
+       priv->dev.persist = persist;
+       priv->dev.flags = flags;
+}
+
 static void mlx4_unload_one(struct pci_dev *pdev)
 {
-       struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
+       struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+       struct mlx4_dev  *dev  = persist->dev;
        struct mlx4_priv *priv = mlx4_priv(dev);
        int               pci_dev_data;
-       int p;
-       int active_vfs = 0;
+       int p, i;
 
        if (priv->removed)
                return;
 
+       /* saving current ports type for further use */
+       for (i = 0; i < dev->caps.num_ports; i++) {
+               dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
+               dev->persist->curr_port_poss_type[i] = dev->caps.
+                                                      possible_type[i + 1];
+       }
+
        pci_dev_data = priv->pci_dev_data;
 
-       /* Disabling SR-IOV is not allowed while there are active vf's */
-       if (mlx4_is_master(dev)) {
-               active_vfs = mlx4_how_many_lives_vf(dev);
-               if (active_vfs) {
-                       pr_warn("Removing PF when there are active VF's !!\n");
-                       pr_warn("Will not disable SR-IOV.\n");
-               }
-       }
        mlx4_stop_sense(dev);
        mlx4_unregister_device(dev);
 
@@ -3156,12 +3374,6 @@ static void mlx4_unload_one(struct pci_dev *pdev)
 
        if (dev->flags & MLX4_FLAG_MSI_X)
                pci_disable_msix(pdev);
-       if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
-               mlx4_warn(dev, "Disabling SR-IOV\n");
-               pci_disable_sriov(pdev);
-               dev->flags &= ~MLX4_FLAG_SRIOV;
-               dev->num_vfs = 0;
-       }
 
        if (!mlx4_is_slave(dev))
                mlx4_free_ownership(dev);
@@ -3173,42 +3385,96 @@ static void mlx4_unload_one(struct pci_dev *pdev)
        kfree(dev->caps.qp1_proxy);
        kfree(dev->dev_vfs);
 
-       memset(priv, 0, sizeof(*priv));
+       mlx4_clean_dev(dev);
        priv->pci_dev_data = pci_dev_data;
        priv->removed = 1;
 }
 
 static void mlx4_remove_one(struct pci_dev *pdev)
 {
-       struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
+       struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+       struct mlx4_dev  *dev  = persist->dev;
        struct mlx4_priv *priv = mlx4_priv(dev);
+       int active_vfs = 0;
+
+       mutex_lock(&persist->interface_state_mutex);
+       persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
+       mutex_unlock(&persist->interface_state_mutex);
+
+       /* Disabling SR-IOV is not allowed while there are active vf's */
+       if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
+               active_vfs = mlx4_how_many_lives_vf(dev);
+               if (active_vfs) {
+                       pr_warn("Removing PF when there are active VF's !!\n");
+                       pr_warn("Will not disable SR-IOV.\n");
+               }
+       }
+
+       /* device marked to be under deletion running now without the lock
+        * letting other tasks to be terminated
+        */
+       if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+               mlx4_unload_one(pdev);
+       else
+               mlx4_info(dev, "%s: interface is down\n", __func__);
+       mlx4_catas_end(dev);
+       if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
+               mlx4_warn(dev, "Disabling SR-IOV\n");
+               pci_disable_sriov(pdev);
+       }
 
-       mlx4_unload_one(pdev);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
+       kfree(dev->persist);
        kfree(priv);
        pci_set_drvdata(pdev, NULL);
 }
 
+static int restore_current_port_types(struct mlx4_dev *dev,
+                                     enum mlx4_port_type *types,
+                                     enum mlx4_port_type *poss_types)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int err, i;
+
+       mlx4_stop_sense(dev);
+
+       mutex_lock(&priv->port_mutex);
+       for (i = 0; i < dev->caps.num_ports; i++)
+               dev->caps.possible_type[i + 1] = poss_types[i];
+       err = mlx4_change_port_types(dev, types);
+       mlx4_start_sense(dev);
+       mutex_unlock(&priv->port_mutex);
+
+       return err;
+}
+
 int mlx4_restart_one(struct pci_dev *pdev)
 {
-       struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
+       struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+       struct mlx4_dev  *dev  = persist->dev;
        struct mlx4_priv *priv = mlx4_priv(dev);
        int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
        int pci_dev_data, err, total_vfs;
 
        pci_dev_data = priv->pci_dev_data;
-       total_vfs = dev->num_vfs;
-       memcpy(nvfs, dev->nvfs, sizeof(dev->nvfs));
+       total_vfs = dev->persist->num_vfs;
+       memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
 
        mlx4_unload_one(pdev);
-       err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv);
+       err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
        if (err) {
                mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
                         __func__, pci_name(pdev), err);
                return err;
        }
 
+       err = restore_current_port_types(dev, dev->persist->curr_port_type,
+                                        dev->persist->curr_port_poss_type);
+       if (err)
+               mlx4_err(dev, "could not restore original port types (%d)\n",
+                        err);
+
        return err;
 }
 
@@ -3263,23 +3529,79 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
                                              pci_channel_state_t state)
 {
-       mlx4_unload_one(pdev);
+       struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
 
-       return state == pci_channel_io_perm_failure ?
-               PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+       mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
+       mlx4_enter_error_state(persist);
+
+       mutex_lock(&persist->interface_state_mutex);
+       if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+               mlx4_unload_one(pdev);
+
+       mutex_unlock(&persist->interface_state_mutex);
+       if (state == pci_channel_io_perm_failure)
+               return PCI_ERS_RESULT_DISCONNECT;
+
+       pci_disable_device(pdev);
+       return PCI_ERS_RESULT_NEED_RESET;
 }
 
 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
 {
-       struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
+       struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+       struct mlx4_dev  *dev  = persist->dev;
        struct mlx4_priv *priv = mlx4_priv(dev);
        int               ret;
+       int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+       int total_vfs;
 
-       ret = __mlx4_init_one(pdev, priv->pci_dev_data, priv);
+       mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       pci_set_master(pdev);
+       pci_restore_state(pdev);
+       pci_save_state(pdev);
+
+       total_vfs = dev->persist->num_vfs;
+       memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+
+       mutex_lock(&persist->interface_state_mutex);
+       if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
+               ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
+                                   priv, 1);
+               if (ret) {
+                       mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
+                                __func__,  ret);
+                       goto end;
+               }
+
+               ret = restore_current_port_types(dev, dev->persist->
+                                                curr_port_type, dev->persist->
+                                                curr_port_poss_type);
+               if (ret)
+                       mlx4_err(dev, "could not restore original port types (%d)\n", ret);
+       }
+end:
+       mutex_unlock(&persist->interface_state_mutex);
 
        return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
 }
 
+static void mlx4_shutdown(struct pci_dev *pdev)
+{
+       struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+
+       mlx4_info(persist->dev, "mlx4_shutdown was called\n");
+       mutex_lock(&persist->interface_state_mutex);
+       if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+               mlx4_unload_one(pdev);
+       mutex_unlock(&persist->interface_state_mutex);
+}
+
 static const struct pci_error_handlers mlx4_err_handler = {
        .error_detected = mlx4_pci_err_detected,
        .slot_reset     = mlx4_pci_slot_reset,
@@ -3289,7 +3611,7 @@ static struct pci_driver mlx4_driver = {
        .name           = DRV_NAME,
        .id_table       = mlx4_pci_table,
        .probe          = mlx4_init_one,
-       .shutdown       = mlx4_unload_one,
+       .shutdown       = mlx4_shutdown,
        .remove         = mlx4_remove_one,
        .err_handler    = &mlx4_err_handler,
 };
@@ -3341,7 +3663,6 @@ static int __init mlx4_init(void)
        if (mlx4_verify_params())
                return -EINVAL;
 
-       mlx4_catas_init();
 
        mlx4_wq = create_singlethread_workqueue("mlx4");
        if (!mlx4_wq)
index a3867e7ef8859811c7d8ee132e966cfe26709a6e..bd9ea0d01aae4cba296d2eba5e9f7a865b5a0426 100644 (file)
@@ -1318,6 +1318,9 @@ out:
        mutex_unlock(&priv->mcg_table.mutex);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
+       if (err && dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
+               /* In case device is under an error, return success as a closing command */
+               err = 0;
        return err;
 }
 
@@ -1347,6 +1350,9 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
                       MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
+       if (err && !attach &&
+           dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
+               err = 0;
        return err;
 }
 
index bdd4eea2247cc1b09c19551a21d1c08f2c65e9d5..1409d0cd6143e8554524c8377a018ccb1c3edba1 100644 (file)
@@ -85,7 +85,9 @@ enum {
        MLX4_CLR_INT_SIZE       = 0x00008,
        MLX4_SLAVE_COMM_BASE    = 0x0,
        MLX4_COMM_PAGESIZE      = 0x1000,
-       MLX4_CLOCK_SIZE         = 0x00008
+       MLX4_CLOCK_SIZE         = 0x00008,
+       MLX4_COMM_CHAN_CAPS     = 0x8,
+       MLX4_COMM_CHAN_FLAGS    = 0xc
 };
 
 enum {
@@ -120,6 +122,10 @@ enum mlx4_mpt_state {
 };
 
 #define MLX4_COMM_TIME         10000
+#define MLX4_COMM_OFFLINE_TIME_OUT 30000
+#define MLX4_COMM_CMD_NA_OP    0x0
+
+
 enum {
        MLX4_COMM_CMD_RESET,
        MLX4_COMM_CMD_VHCR0,
@@ -190,6 +196,7 @@ struct mlx4_vhcr {
 struct mlx4_vhcr_cmd {
        __be64 in_param;
        __be32 in_modifier;
+       u32 reserved1;
        __be64 out_param;
        __be16 token;
        u16 reserved;
@@ -221,21 +228,24 @@ extern int mlx4_debug_level;
 #define mlx4_dbg(mdev, format, ...)                                    \
 do {                                                                   \
        if (mlx4_debug_level)                                           \
-               dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format,      \
+               dev_printk(KERN_DEBUG,                                  \
+                          &(mdev)->persist->pdev->dev, format,         \
                           ##__VA_ARGS__);                              \
 } while (0)
 
 #define mlx4_err(mdev, format, ...)                                    \
-       dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+       dev_err(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
 #define mlx4_info(mdev, format, ...)                                   \
-       dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+       dev_info(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
 #define mlx4_warn(mdev, format, ...)                                   \
-       dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+       dev_warn(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
 
 extern int mlx4_log_num_mgm_entry_size;
 extern int log_mtts_per_seg;
+extern int mlx4_internal_err_reset;
 
-#define MLX4_MAX_NUM_SLAVES    (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
+#define MLX4_MAX_NUM_SLAVES    (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \
+                                    MLX4_MFUNC_MAX))
 #define ALL_SLAVES 0xff
 
 struct mlx4_bitmap {
@@ -606,7 +616,6 @@ struct mlx4_mgm {
 struct mlx4_cmd {
        struct pci_pool        *pool;
        void __iomem           *hcr;
-       struct mutex            hcr_mutex;
        struct mutex            slave_cmd_mutex;
        struct semaphore        poll_sem;
        struct semaphore        event_sem;
@@ -877,6 +886,8 @@ struct mlx4_priv {
        int                     reserved_mtts;
        int                     fs_hash_mode;
        u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
+       struct mlx4_port_map    v2p; /* cached port mapping configuration */
+       struct mutex            bond_mutex; /* for bond mode */
        __be64                  slave_node_guids[MLX4_MFUNC_MAX];
 
        atomic_t                opreq_count;
@@ -994,7 +1005,8 @@ void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
 
 void mlx4_start_catas_poll(struct mlx4_dev *dev);
 void mlx4_stop_catas_poll(struct mlx4_dev *dev);
-void mlx4_catas_init(void);
+int mlx4_catas_init(struct mlx4_dev *dev);
+void mlx4_catas_end(struct mlx4_dev *dev);
 int mlx4_restart_one(struct pci_dev *pdev);
 int mlx4_register_device(struct mlx4_dev *dev);
 void mlx4_unregister_device(struct mlx4_dev *dev);
@@ -1160,13 +1172,14 @@ enum {
 int mlx4_cmd_init(struct mlx4_dev *dev);
 void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask);
 int mlx4_multi_func_init(struct mlx4_dev *dev);
+int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev);
 void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
 int mlx4_cmd_use_events(struct mlx4_dev *dev);
 void mlx4_cmd_use_polling(struct mlx4_dev *dev);
 
 int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
-                 unsigned long timeout);
+                 u16 op, unsigned long timeout);
 
 void mlx4_cq_tasklet_cb(unsigned long data);
 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
@@ -1176,7 +1189,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
 
 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
 
-void mlx4_handle_catas_err(struct mlx4_dev *dev);
+void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
 
 int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
                    enum mlx4_port_type *type);
@@ -1354,6 +1367,7 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
 /* Returns the VF index of slave */
 int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
 int mlx4_config_mad_demux(struct mlx4_dev *dev);
+int mlx4_do_bond(struct mlx4_dev *dev, bool enable);
 
 enum mlx4_zone_flags {
        MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO   = 1UL << 0,
index 944a112dff374ef919a216e5ee13415b9f3974bd..2a8268e6be15d0b8682b8ad47bb4bb4ac071b243 100644 (file)
@@ -390,6 +390,7 @@ struct mlx4_en_dev {
        struct pci_dev          *pdev;
        struct mutex            state_lock;
        struct net_device       *pndev[MLX4_MAX_PORTS + 1];
+       struct net_device       *upper[MLX4_MAX_PORTS + 1];
        u32                     port_cnt;
        bool                    device_up;
        struct mlx4_en_profile  profile;
@@ -410,6 +411,7 @@ struct mlx4_en_dev {
        unsigned long           overflow_period;
        struct ptp_clock        *ptp_clock;
        struct ptp_clock_info   ptp_clock_info;
+       struct notifier_block   nb;
 };
 
 
@@ -845,6 +847,9 @@ int mlx4_en_reset_config(struct net_device *dev,
                         struct hwtstamp_config ts_config,
                         netdev_features_t new_features);
 
+int mlx4_en_netdev_event(struct notifier_block *this,
+                        unsigned long event, void *ptr);
+
 /*
  * Functions for time stamping
  */
index d6f549685c0fcd8a5cccf0c948536ed924024892..78f51e103880d4dcae7745ec5cb5b2425e370e73 100644 (file)
@@ -584,6 +584,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free);
 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
 {
        mlx4_mtt_cleanup(dev, &mr->mtt);
+       mr->mtt.order = -1;
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
 
@@ -593,18 +594,15 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
 {
        int err;
 
-       mpt_entry->start       = cpu_to_be64(iova);
-       mpt_entry->length      = cpu_to_be64(size);
-       mpt_entry->entity_size = cpu_to_be32(page_shift);
-
        err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
        if (err)
                return err;
 
-       mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
-                                          MLX4_MPT_PD_FLAG_EN_INV);
-       mpt_entry->flags    &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
-                                          MLX4_MPT_FLAG_SW_OWNS);
+       mpt_entry->start       = cpu_to_be64(iova);
+       mpt_entry->length      = cpu_to_be64(size);
+       mpt_entry->entity_size = cpu_to_be32(page_shift);
+       mpt_entry->flags    &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE |
+                                          MLX4_MPT_FLAG_SW_OWNS));
        if (mr->mtt.order < 0) {
                mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
                mpt_entry->mtt_addr = 0;
@@ -707,13 +705,13 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
        if (!mtts)
                return -ENOMEM;
 
-       dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
+       dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle,
                                npages * sizeof (u64), DMA_TO_DEVICE);
 
        for (i = 0; i < npages; ++i)
                mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
 
-       dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
+       dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle,
                                   npages * sizeof (u64), DMA_TO_DEVICE);
 
        return 0;
@@ -1019,13 +1017,13 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
        /* Make sure MPT status is visible before writing MTT entries */
        wmb();
 
-       dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
+       dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle,
                                npages * sizeof(u64), DMA_TO_DEVICE);
 
        for (i = 0; i < npages; ++i)
                fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
 
-       dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
+       dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle,
                                   npages * sizeof(u64), DMA_TO_DEVICE);
 
        fmr->mpt->key    = cpu_to_be32(key);
@@ -1154,7 +1152,7 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_free);
 
 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
 {
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
-                       MLX4_CMD_NATIVE);
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT,
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
index 74216071201f3877fadf3aaaff3b6f0910425e32..609c59dc854e987a073e32cb4ff2c08736998edb 100644 (file)
@@ -151,11 +151,13 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
                return -ENOMEM;
 
        if (mlx4_is_slave(dev))
-               offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) /
+               offset = uar->index % ((int)pci_resource_len(dev->persist->pdev,
+                                                            2) /
                                       dev->caps.uar_page_size);
        else
                offset = uar->index;
-       uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset;
+       uar->pfn = (pci_resource_start(dev->persist->pdev, 2) >> PAGE_SHIFT)
+                   + offset;
        uar->map = NULL;
        return 0;
 }
@@ -212,7 +214,6 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
                list_add(&uar->bf_list, &priv->bf_list);
        }
 
-       bf->uar = uar;
        idx = ffz(uar->free_bf_bmap);
        uar->free_bf_bmap |= 1 << idx;
        bf->uar = uar;
index 30eb1ead0fe6ee8942aeb1aefb57971d35590bcf..9f268f05290aa7b6491a8eb1a3a8adc636e2ac56 100644 (file)
@@ -553,9 +553,9 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
                slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
                                    dev, &exclusive_ports);
                slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
-                                          dev->num_vfs + 1);
+                                          dev->persist->num_vfs + 1);
        }
-       vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
+       vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
        if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
                return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
        return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
@@ -590,10 +590,10 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
                slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
                                    dev, &exclusive_ports);
                slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
-                                          dev->num_vfs + 1);
+                                          dev->persist->num_vfs + 1);
        }
        gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
-       vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
+       vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
        if (slave_gid <= gids % vfs)
                return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
 
@@ -644,7 +644,7 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
        int num_eth_ports, err;
        int i;
 
-       if (slave < 0 || slave > dev->num_vfs)
+       if (slave < 0 || slave > dev->persist->num_vfs)
                return;
 
        actv_ports = mlx4_get_active_ports(dev, slave);
@@ -1214,7 +1214,8 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
                return -EINVAL;
 
        slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
-       num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
+       num_vfs = bitmap_weight(slaves_pport.slaves,
+                               dev->persist->num_vfs + 1) - 1;
 
        for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
                if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
@@ -1258,7 +1259,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
                                                        dev, &exclusive_ports);
                                num_vfs_before += bitmap_weight(
                                                slaves_pport_actv.slaves,
-                                               dev->num_vfs + 1);
+                                               dev->persist->num_vfs + 1);
                        }
 
                        /* candidate_slave_gid isn't necessarily the correct slave, but
@@ -1288,7 +1289,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
                                                dev, &exclusive_ports);
                                slave_gid += bitmap_weight(
                                                slaves_pport_actv.slaves,
-                                               dev->num_vfs + 1);
+                                               dev->persist->num_vfs + 1);
                        }
                }
                *slave_id = slave_gid;
index 1586ecce13c719b1eaddfb43f0d96d80e27b6389..2bb8553bd9054b25456ec694ee25696e93ebde25 100644 (file)
@@ -882,6 +882,8 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
        for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
                context->flags &= cpu_to_be32(~(0xf << 28));
                context->flags |= cpu_to_be32(states[i + 1] << 28);
+               if (states[i + 1] != MLX4_QP_STATE_RTR)
+                       context->params2 &= ~MLX4_QP_BIT_FPP;
                err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
                                     context, 0, 0, qp);
                if (err) {
index ea1c6d092145a5d7e150e8549577edc87f3b0472..0076d88587ca06f36b10272a7bd760f1f70649d8 100644 (file)
@@ -76,19 +76,21 @@ int mlx4_reset(struct mlx4_dev *dev)
                goto out;
        }
 
-       pcie_cap = pci_pcie_cap(dev->pdev);
+       pcie_cap = pci_pcie_cap(dev->persist->pdev);
 
        for (i = 0; i < 64; ++i) {
                if (i == 22 || i == 23)
                        continue;
-               if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
+               if (pci_read_config_dword(dev->persist->pdev, i * 4,
+                                         hca_header + i)) {
                        err = -ENODEV;
                        mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
                        goto out;
                }
        }
 
-       reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE,
+       reset = ioremap(pci_resource_start(dev->persist->pdev, 0) +
+                       MLX4_RESET_BASE,
                        MLX4_RESET_SIZE);
        if (!reset) {
                err = -ENOMEM;
@@ -122,8 +124,8 @@ int mlx4_reset(struct mlx4_dev *dev)
 
        end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
        do {
-               if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) &&
-                   vendor != 0xffff)
+               if (!pci_read_config_word(dev->persist->pdev, PCI_VENDOR_ID,
+                                         &vendor) && vendor != 0xffff)
                        break;
 
                msleep(1);
@@ -138,14 +140,16 @@ int mlx4_reset(struct mlx4_dev *dev)
        /* Now restore the PCI headers */
        if (pcie_cap) {
                devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
-               if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
+               if (pcie_capability_write_word(dev->persist->pdev,
+                                              PCI_EXP_DEVCTL,
                                               devctl)) {
                        err = -ENODEV;
                        mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
                        goto out;
                }
                linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
-               if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
+               if (pcie_capability_write_word(dev->persist->pdev,
+                                              PCI_EXP_LNKCTL,
                                               linkctl)) {
                        err = -ENODEV;
                        mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
@@ -157,7 +161,8 @@ int mlx4_reset(struct mlx4_dev *dev)
                if (i * 4 == PCI_COMMAND)
                        continue;
 
-               if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
+               if (pci_write_config_dword(dev->persist->pdev, i * 4,
+                                          hca_header[i])) {
                        err = -ENODEV;
                        mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
                                 i);
@@ -165,7 +170,7 @@ int mlx4_reset(struct mlx4_dev *dev)
                }
        }
 
-       if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
+       if (pci_write_config_dword(dev->persist->pdev, PCI_COMMAND,
                                   hca_header[PCI_COMMAND / 4])) {
                err = -ENODEV;
                mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
index 4efbd1eca6116001e21d655c54479c71cb6f6584..486e3d26cd4a9ef4bb6a23995b85ac50cd413776 100644 (file)
@@ -309,12 +309,13 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
        int allocated, free, reserved, guaranteed, from_free;
        int from_rsvd;
 
-       if (slave > dev->num_vfs)
+       if (slave > dev->persist->num_vfs)
                return -EINVAL;
 
        spin_lock(&res_alloc->alloc_lock);
        allocated = (port > 0) ?
-               res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
+               res_alloc->allocated[(port - 1) *
+               (dev->persist->num_vfs + 1) + slave] :
                res_alloc->allocated[slave];
        free = (port > 0) ? res_alloc->res_port_free[port - 1] :
                res_alloc->res_free;
@@ -352,7 +353,8 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
        if (!err) {
                /* grant the request */
                if (port > 0) {
-                       res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
+                       res_alloc->allocated[(port - 1) *
+                       (dev->persist->num_vfs + 1) + slave] += count;
                        res_alloc->res_port_free[port - 1] -= count;
                        res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
                } else {
@@ -376,13 +378,14 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
                &priv->mfunc.master.res_tracker.res_alloc[res_type];
        int allocated, guaranteed, from_rsvd;
 
-       if (slave > dev->num_vfs)
+       if (slave > dev->persist->num_vfs)
                return;
 
        spin_lock(&res_alloc->alloc_lock);
 
        allocated = (port > 0) ?
-               res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
+               res_alloc->allocated[(port - 1) *
+               (dev->persist->num_vfs + 1) + slave] :
                res_alloc->allocated[slave];
        guaranteed = res_alloc->guaranteed[slave];
 
@@ -397,7 +400,8 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
        }
 
        if (port > 0) {
-               res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
+               res_alloc->allocated[(port - 1) *
+               (dev->persist->num_vfs + 1) + slave] -= count;
                res_alloc->res_port_free[port - 1] += count;
                res_alloc->res_port_rsvd[port - 1] += from_rsvd;
        } else {
@@ -415,7 +419,8 @@ static inline void initialize_res_quotas(struct mlx4_dev *dev,
                                         enum mlx4_resource res_type,
                                         int vf, int num_instances)
 {
-       res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
+       res_alloc->guaranteed[vf] = num_instances /
+                                   (2 * (dev->persist->num_vfs + 1));
        res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
        if (vf == mlx4_master_func_num(dev)) {
                res_alloc->res_free = num_instances;
@@ -486,21 +491,26 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
                struct resource_allocator *res_alloc =
                        &priv->mfunc.master.res_tracker.res_alloc[i];
-               res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
-               res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+               res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
+                                          sizeof(int), GFP_KERNEL);
+               res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
+                                               sizeof(int), GFP_KERNEL);
                if (i == RES_MAC || i == RES_VLAN)
                        res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
-                                                      (dev->num_vfs + 1) * sizeof(int),
-                                                       GFP_KERNEL);
+                                                      (dev->persist->num_vfs
+                                                      + 1) *
+                                                      sizeof(int), GFP_KERNEL);
                else
-                       res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
+                       res_alloc->allocated = kzalloc((dev->persist->
+                                                       num_vfs + 1) *
+                                                      sizeof(int), GFP_KERNEL);
 
                if (!res_alloc->quota || !res_alloc->guaranteed ||
                    !res_alloc->allocated)
                        goto no_mem_err;
 
                spin_lock_init(&res_alloc->alloc_lock);
-               for (t = 0; t < dev->num_vfs + 1; t++) {
+               for (t = 0; t < dev->persist->num_vfs + 1; t++) {
                        struct mlx4_active_ports actv_ports =
                                mlx4_get_active_ports(dev, t);
                        switch (i) {
@@ -2531,7 +2541,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
        /* Make sure that the PD bits related to the slave id are zeros. */
        pd = mr_get_pd(inbox->buf);
        pd_slave = (pd >> 17) & 0x7f;
-       if (pd_slave != 0 && pd_slave != slave) {
+       if (pd_slave != 0 && --pd_slave != slave) {
                err = -EPERM;
                goto ex_abort;
        }
@@ -2934,6 +2944,9 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
        qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
        optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
 
+       if (slave != mlx4_master_func_num(dev))
+               qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
+
        switch (qp_type) {
        case MLX4_QP_ST_RC:
        case MLX4_QP_ST_XRC:
@@ -4667,7 +4680,6 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
        int state;
        LIST_HEAD(tlist);
        int eqn;
-       struct mlx4_cmd_mailbox *mailbox;
 
        err = move_all_busy(dev, slave, RES_EQ);
        if (err)
@@ -4693,20 +4705,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
                                        break;
 
                                case RES_EQ_HW:
-                                       mailbox = mlx4_alloc_cmd_mailbox(dev);
-                                       if (IS_ERR(mailbox)) {
-                                               cond_resched();
-                                               continue;
-                                       }
-                                       err = mlx4_cmd_box(dev, slave, 0,
-                                                          eqn & 0xff, 0,
-                                                          MLX4_CMD_HW2SW_EQ,
-                                                          MLX4_CMD_TIME_CLASS_A,
-                                                          MLX4_CMD_NATIVE);
+                                       err = mlx4_cmd(dev, slave, eqn & 0xff,
+                                                      1, MLX4_CMD_HW2SW_EQ,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_NATIVE);
                                        if (err)
                                                mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
                                                         slave, eqn);
-                                       mlx4_free_cmd_mailbox(dev, mailbox);
                                        atomic_dec(&eq->mtt->ref_count);
                                        state = RES_EQ_RESERVED;
                                        break;
index 10e1f1a18255986f4e9c5a42e1fb909a0a869028..4878025e231c6b4ebb9f63c72ec3fcdf1402150b 100644 (file)
@@ -300,11 +300,11 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
                param = qp->pid;
                break;
        case QP_STATE:
-               param = (u64)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
+               param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
                *is_str = 1;
                break;
        case QP_XPORT:
-               param = (u64)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
+               param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
                *is_str = 1;
                break;
        case QP_MTU:
@@ -464,7 +464,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
 
 
        if (is_str)
-               ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)field);
+               ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
        else
                ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
 
index 3f4525619a07efb6de4711189565a1f06e624293..d6651937d8996188b249fe911b09e9cbbdc480a5 100644 (file)
@@ -903,12 +903,12 @@ static void remove_one(struct pci_dev *pdev)
 }
 
 static const struct pci_device_id mlx5_core_pci_table[] = {
-       { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */
-       { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */
-       { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
-       { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */
-       { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */
-       { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */
+       { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
+       { PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */
+       { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */
+       { PCI_VDEVICE(MELLANOX, 0x1014) }, /* ConnectX-4 VF */
+       { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
+       { PCI_VDEVICE(MELLANOX, 0x1016) }, /* ConnectX-4LX VF */
        { 0, }
 };
 
index af099057f0e9c263dc250924785a15cf75ea6edb..1412f5af05ecf521e41ff109dc1a24e7621090ce 100644 (file)
@@ -4033,8 +4033,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
        mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
                                      &mgp->cmd_bus, GFP_KERNEL);
-       if (mgp->cmd == NULL)
+       if (!mgp->cmd) {
+               status = -ENOMEM;
                goto abort_with_enabled;
+       }
 
        mgp->board_span = pci_resource_len(pdev, 0);
        mgp->iomem_base = pci_resource_start(pdev, 0);
@@ -4224,8 +4226,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
                mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
 #endif
        myri10ge_free_slices(mgp);
-       if (mgp->msix_vectors != NULL)
-               kfree(mgp->msix_vectors);
+       kfree(mgp->msix_vectors);
        dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
                          mgp->cmd, mgp->cmd_bus);
 
index 2552e550a78cb56926fd2eeb0f163683308c9378..eb807b0dc72a3745efc88e6f5ffdba2c126244c8 100644 (file)
@@ -1122,12 +1122,12 @@ again:
        }
 
 #ifdef NS83820_VLAN_ACCEL_SUPPORT
-       if(vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                /* fetch the vlan tag info out of the
                 * ancillary data if the vlan code
                 * is using hw vlan acceleration
                 */
-               short tag = vlan_tx_tag_get(skb);
+               short tag = skb_vlan_tag_get(skb);
                extsts |= (EXTSTS_VPKT | htons(tag));
        }
 #endif
index f5e4b820128ba8966e8a7d4ecdc9713e7982aa61..a4cdf2f8041a735de3a199d5e0ba6c5040a02b4e 100644 (file)
@@ -4045,8 +4045,8 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        queue = 0;
-       if (vlan_tx_tag_present(skb))
-               vlan_tag = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb))
+               vlan_tag = skb_vlan_tag_get(skb);
        if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
                if (skb->protocol == htons(ETH_P_IP)) {
                        struct iphdr *ip;
@@ -6987,7 +6987,9 @@ static int s2io_add_isr(struct s2io_nic *sp)
                        if (sp->s2io_entries[i].in_use == MSIX_FLG) {
                                if (sp->s2io_entries[i].type ==
                                    MSIX_RING_TYPE) {
-                                       sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
+                                       snprintf(sp->desc[i],
+                                               sizeof(sp->desc[i]),
+                                               "%s:MSI-X-%d-RX",
                                                dev->name, i);
                                        err = request_irq(sp->entries[i].vector,
                                                          s2io_msix_ring_handle,
@@ -6996,7 +6998,9 @@ static int s2io_add_isr(struct s2io_nic *sp)
                                                          sp->s2io_entries[i].arg);
                                } else if (sp->s2io_entries[i].type ==
                                           MSIX_ALARM_TYPE) {
-                                       sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
+                                       snprintf(sp->desc[i],
+                                               sizeof(sp->desc[i]),
+                                               "%s:MSI-X-%d-TX",
                                                dev->name, i);
                                        err = request_irq(sp->entries[i].vector,
                                                          s2io_msix_fifo_handle,
@@ -8154,7 +8158,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
                          "%s: UDP Fragmentation Offload(UFO) enabled\n",
                          dev->name);
        /* Initialize device name */
-       sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
+       snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
+                sp->product_name);
 
        if (vlan_tag_strip)
                sp->vlan_strip_flag = 1;
index 2bbd01fcb9b019eaeabc1519a98799e65ea1b51a..6223930a8155e66878e1bde5645c5d31809e2a8b 100644 (file)
@@ -4637,7 +4637,7 @@ static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
        vpath->ringh = NULL;
        vpath->fifoh = NULL;
        memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
-       vpath->stats_block = 0;
+       vpath->stats_block = NULL;
        vpath->hw_stats = NULL;
        vpath->hw_stats_sav = NULL;
        vpath->sw_stats = NULL;
index cc0485e3c6210997c532fd4cee51946e93983104..50d5604833edc8e0ffde1f7f8974002852f8bb97 100644 (file)
@@ -890,8 +890,8 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
                dev->name, __func__, __LINE__,
                fifo_hw, dtr, dtr_priv);
 
-       if (vlan_tx_tag_present(skb)) {
-               u16 vlan_tag = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               u16 vlan_tag = skb_vlan_tag_get(skb);
                vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
        }
 
index f39cae620f61568688c79415c6dbf9ebbf96ff90..a41bb5e6b954f0e6b40375b76a52ed8a254cdb18 100644 (file)
@@ -2462,9 +2462,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
                         NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
 
        /* vlan tag */
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
-                                       vlan_tx_tag_get(skb));
+                                       skb_vlan_tag_get(skb));
        else
                start_tx->txvlan = 0;
 
index 613037584d08e785ef2700ca1d2221b50b256e9c..e0c31e3947d1091371bfa742fbea5cee9743002d 100644 (file)
@@ -176,9 +176,7 @@ netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
 static void
 netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
 {
-       if (recv_ctx->sds_rings != NULL)
-               kfree(recv_ctx->sds_rings);
-
+       kfree(recv_ctx->sds_rings);
        recv_ctx->sds_rings = NULL;
 }
 
@@ -1893,9 +1891,9 @@ netxen_tso_check(struct net_device *netdev,
                protocol = vh->h_vlan_encapsulated_proto;
                flags = FLAGS_VLAN_TAGGED;
 
-       } else if (vlan_tx_tag_present(skb)) {
+       } else if (skb_vlan_tag_present(skb)) {
                flags = FLAGS_VLAN_OOB;
-               vid = vlan_tx_tag_get(skb);
+               vid = skb_vlan_tag_get(skb);
                netxen_set_tx_vlan_tci(first_desc, vid);
                vlan_oob = 1;
        }
@@ -2388,7 +2386,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
 
        work_done = netxen_process_rcv_ring(sds_ring, budget);
 
-       if ((work_done < budget) && tx_complete) {
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
                napi_complete(&sds_ring->napi);
                if (test_bit(__NX_DEV_UP, &adapter->state))
                        netxen_nic_enable_int(sds_ring);
index c2f09af5c25b9f389ce2eb0bbe85487d733d3e1e..4847713211cafa2258b9511cda89f4232a623ebe 100644 (file)
@@ -146,10 +146,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
 {
        int i = 0;
 
-       while (i < 10) {
-               if (i)
-                       ssleep(1);
-
+       do {
                if (ql_sem_lock(qdev,
                                QL_DRVR_SEM_MASK,
                                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
@@ -158,7 +155,8 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
                                      "driver lock acquired\n");
                        return 1;
                }
-       }
+               ssleep(1);
+       } while (++i < 10);
 
        netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
        return 0;
index 18e5de72e9b4c2c9b95848bf444597251942039e..d4b5085a21fa889c73dcefda89642d2a1eaa02b7 100644 (file)
@@ -10,6 +10,7 @@
 #include <net/ip.h>
 #include <linux/ipv6.h>
 #include <net/checksum.h>
+#include <linux/printk.h>
 
 #include "qlcnic.h"
 
@@ -320,8 +321,8 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
                if (protocol == ETH_P_8021Q) {
                        vh = (struct vlan_ethhdr *)skb->data;
                        vlan_id = ntohs(vh->h_vlan_TCI);
-               } else if (vlan_tx_tag_present(skb)) {
-                       vlan_id = vlan_tx_tag_get(skb);
+               } else if (skb_vlan_tag_present(skb)) {
+                       vlan_id = skb_vlan_tag_get(skb);
                }
        }
 
@@ -472,9 +473,9 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
                flags = QLCNIC_FLAGS_VLAN_TAGGED;
                vlan_tci = ntohs(vh->h_vlan_TCI);
                protocol = ntohs(vh->h_vlan_encapsulated_proto);
-       } else if (vlan_tx_tag_present(skb)) {
+       } else if (skb_vlan_tag_present(skb)) {
                flags = QLCNIC_FLAGS_VLAN_OOB;
-               vlan_tci = vlan_tx_tag_get(skb);
+               vlan_tci = skb_vlan_tag_get(skb);
        }
        if (unlikely(adapter->tx_pvid)) {
                if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
@@ -967,7 +968,12 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
        tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
                                              budget);
        work_done = qlcnic_process_rcv_ring(sds_ring, budget);
-       if ((work_done < budget) && tx_complete) {
+
+       /* Check if we need a repoll */
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
                napi_complete(&sds_ring->napi);
                if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
                        qlcnic_enable_sds_intr(adapter, sds_ring);
@@ -992,6 +998,9 @@ static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
                napi_complete(&tx_ring->napi);
                if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
                        qlcnic_enable_tx_intr(adapter, tx_ring);
+       } else {
+               /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
+               work_done = budget;
        }
 
        return work_done;
@@ -1465,14 +1474,14 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
 
 static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
 {
-       int i;
-       unsigned char *data = skb->data;
+       if (adapter->ahw->msg_enable & NETIF_MSG_DRV) {
+               char prefix[30];
+
+               scnprintf(prefix, sizeof(prefix), "%s: %s: ",
+                         dev_name(&adapter->pdev->dev), __func__);
 
-       pr_info(KERN_INFO "\n");
-       for (i = 0; i < skb->len; i++) {
-               QLCDB(adapter, DRV, "%02x ", data[i]);
-               if ((i & 0x0f) == 8)
-                       pr_info(KERN_INFO "\n");
+               print_hex_dump_debug(prefix, DUMP_PREFIX_NONE, 16, 1,
+                                    skb->data, skb->len, true);
        }
 }
 
@@ -1950,7 +1959,12 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
 
        tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
        work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
-       if ((work_done < budget) && tx_complete) {
+
+       /* Check if we need a repoll */
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
                napi_complete(&sds_ring->napi);
                qlcnic_enable_sds_intr(adapter, sds_ring);
        }
@@ -1973,7 +1987,12 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
 
        tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
        work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
-       if ((work_done < budget) && tx_complete) {
+
+       /* Check if we need a repoll */
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
                napi_complete(&sds_ring->napi);
                qlcnic_enable_sds_intr(adapter, sds_ring);
        }
@@ -1995,6 +2014,9 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
                napi_complete(&tx_ring->napi);
                if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
                        qlcnic_enable_tx_intr(adapter, tx_ring);
+       } else {
+               /* need a repoll */
+               work_done = budget;
        }
 
        return work_done;
index 9929b97cfb3629d1afb04809f8faefd7302347b8..a430a34a4434aa78a87cb43d90c805fa83ac8252 100644 (file)
@@ -294,9 +294,7 @@ int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
 
 void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
 {
-       if (recv_ctx->sds_rings != NULL)
-               kfree(recv_ctx->sds_rings);
-
+       kfree(recv_ctx->sds_rings);
        recv_ctx->sds_rings = NULL;
 }
 
@@ -1257,8 +1255,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
        if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
                if (fw_dump->tmpl_hdr == NULL ||
                                adapter->fw_version > prev_fw_version) {
-                       if (fw_dump->tmpl_hdr)
-                               vfree(fw_dump->tmpl_hdr);
+                       vfree(fw_dump->tmpl_hdr);
                        if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
                                dev_info(&pdev->dev,
                                        "Supports FW dump capability\n");
@@ -2374,13 +2371,12 @@ void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
 
        for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
                tx_ring = &adapter->tx_ring[ring];
-               if (tx_ring && tx_ring->cmd_buf_arr != NULL) {
+               if (tx_ring) {
                        vfree(tx_ring->cmd_buf_arr);
                        tx_ring->cmd_buf_arr = NULL;
                }
        }
-       if (adapter->tx_ring != NULL)
-               kfree(adapter->tx_ring);
+       kfree(adapter->tx_ring);
 }
 
 int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
@@ -2605,6 +2601,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        } else {
                dev_err(&pdev->dev,
                        "%s: failed. Please Reboot\n", __func__);
+               err = -ENODEV;
                goto err_out_free_hw;
        }
 
@@ -2757,13 +2754,9 @@ static void qlcnic_remove(struct pci_dev *pdev)
        }
 
        qlcnic_dcb_free(adapter->dcb);
-
        qlcnic_detach(adapter);
-
-       if (adapter->npars != NULL)
-               kfree(adapter->npars);
-       if (adapter->eswitch != NULL)
-               kfree(adapter->eswitch);
+       kfree(adapter->npars);
+       kfree(adapter->eswitch);
 
        if (qlcnic_82xx_check(adapter))
                qlcnic_clr_all_drv_state(adapter, 0);
@@ -2931,13 +2924,13 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
 
 static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
 {
-       if (adapter->fhash.fmax && adapter->fhash.fhead)
+       if (adapter->fhash.fmax)
                kfree(adapter->fhash.fhead);
 
        adapter->fhash.fhead = NULL;
        adapter->fhash.fmax = 0;
 
-       if (adapter->rx_fhash.fmax && adapter->rx_fhash.fhead)
+       if (adapter->rx_fhash.fmax)
                kfree(adapter->rx_fhash.fhead);
 
        adapter->rx_fhash.fmax = 0;
index c9f57fb84b9eb47215f0cc21a680dce46d253e56..332bb8a3f43060bea2ca65991e485e7d8aec1564 100644 (file)
@@ -1407,8 +1407,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
        current_version = qlcnic_83xx_get_fw_version(adapter);
 
        if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
-               if (fw_dump->tmpl_hdr)
-                       vfree(fw_dump->tmpl_hdr);
+               vfree(fw_dump->tmpl_hdr);
                if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
                        dev_info(&pdev->dev, "Supports FW dump capability\n");
        }
index 6c904a6cad2a177036b42190cffb17a25e194708..8011ef3e7707f783f4caf9f1c16f3d7be3410c4f 100644 (file)
@@ -2351,23 +2351,29 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
        int status = 0;
+       bool need_restart = netif_running(ndev);
 
-       status = ql_adapter_down(qdev);
-       if (status) {
-               netif_err(qdev, link, qdev->ndev,
-                         "Failed to bring down the adapter\n");
-               return status;
+       if (need_restart) {
+               status = ql_adapter_down(qdev);
+               if (status) {
+                       netif_err(qdev, link, qdev->ndev,
+                                 "Failed to bring down the adapter\n");
+                       return status;
+               }
        }
 
        /* update the features with resent change */
        ndev->features = features;
 
-       status = ql_adapter_up(qdev);
-       if (status) {
-               netif_err(qdev, link, qdev->ndev,
-                         "Failed to bring up the adapter\n");
-               return status;
+       if (need_restart) {
+               status = ql_adapter_up(qdev);
+               if (status) {
+                       netif_err(qdev, link, qdev->ndev,
+                                 "Failed to bring up the adapter\n");
+                       return status;
+               }
        }
+
        return status;
 }
 
@@ -2660,11 +2666,11 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
 
        mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
-                            "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
+                            "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
                mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
-               mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
+               mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
        }
        tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
        if (tso < 0) {
index 9c31e46d1eee481d112b6cd9a5fb0ec761322b07..d79e33b3c1913ae41caa046860fc74fb2d4c8698 100644 (file)
@@ -708,8 +708,8 @@ static void cp_tx (struct cp_private *cp)
 
 static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
 {
-       return vlan_tx_tag_present(skb) ?
-               TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+       return skb_vlan_tag_present(skb) ?
+               TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
 }
 
 static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
index 14a1c5cec3a59fc8699858a507ca3b077b45981b..cd286b0356ab497fb437c93aa46cc0233c68c976 100644 (file)
@@ -2073,8 +2073,8 @@ static int rtl8169_set_features(struct net_device *dev,
 
 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
 {
-       return (vlan_tx_tag_present(skb)) ?
-               TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+       return (skb_vlan_tag_present(skb)) ?
+               TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
 }
 
 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
@@ -7049,6 +7049,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
        u32 status, len;
        u32 opts[2];
        int frags;
+       bool stop_queue;
 
        if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
                netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7105,11 +7106,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 
        tp->cur_tx += frags + 1;
 
-       RTL_W8(TxPoll, NPQ);
+       stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS);
 
-       mmiowb();
+       if (!skb->xmit_more || stop_queue ||
+           netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
+               RTL_W8(TxPoll, NPQ);
+
+               mmiowb();
+       }
 
-       if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
+       if (stop_queue) {
                /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
                 * not miss a ring update when it notices a stopped queue.
                 */
index c29ba80ae02bfde60f41f4118c02bab642303b89..4da8bd263997a17baf89b5fe7a3d2198f186827b 100644 (file)
@@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
        [TSU_ADRL31]    = 0x01fc,
 };
 
+static void sh_eth_rcv_snd_disable(struct net_device *ndev);
+static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
+
 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
 {
        return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -473,6 +476,7 @@ static struct sh_eth_cpu_data r8a777x_data = {
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
                          EESR_ECI,
+       .fdr_value      = 0x00000f0f,
 
        .apr            = 1,
        .mpr            = 1,
@@ -495,6 +499,9 @@ static struct sh_eth_cpu_data r8a779x_data = {
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
                          EESR_ECI,
+       .fdr_value      = 0x00000f0f,
+
+       .trscer_err_mask = DESC_I_RINT8,
 
        .apr            = 1,
        .mpr            = 1,
@@ -590,7 +597,7 @@ static struct sh_eth_cpu_data sh7757_data = {
 static void sh_eth_chip_reset_giga(struct net_device *ndev)
 {
        int i;
-       unsigned long mahr[2], malr[2];
+       u32 mahr[2], malr[2];
 
        /* save MAHR and MALR */
        for (i = 0; i < 2; i++) {
@@ -856,6 +863,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
 
        if (!cd->eesr_err_check)
                cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
+
+       if (!cd->trscer_err_mask)
+               cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
 }
 
 static int sh_eth_check_reset(struct net_device *ndev)
@@ -981,7 +991,7 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
        }
 }
 
-static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
+static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
 {
        if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
                return EDTRR_TRNS_GETHER;
@@ -1113,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
        int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
        int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
        int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+       dma_addr_t dma_addr;
 
        mdp->cur_rx = 0;
        mdp->cur_tx = 0;
@@ -1126,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev)
                /* skb */
                mdp->rx_skbuff[i] = NULL;
                skb = netdev_alloc_skb(ndev, skbuff_size);
-               mdp->rx_skbuff[i] = skb;
                if (skb == NULL)
                        break;
                sh_eth_set_receive_align(skb);
@@ -1135,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
                rxdesc = &mdp->rx_ring[i];
                /* The size of the buffer is a multiple of 16 bytes. */
                rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
-               dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
-                              DMA_FROM_DEVICE);
-               rxdesc->addr = virt_to_phys(skb->data);
+               dma_addr = dma_map_single(&ndev->dev, skb->data,
+                                         rxdesc->buffer_length,
+                                         DMA_FROM_DEVICE);
+               if (dma_mapping_error(&ndev->dev, dma_addr)) {
+                       kfree_skb(skb);
+                       break;
+               }
+               mdp->rx_skbuff[i] = skb;
+               rxdesc->addr = dma_addr;
                rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
 
                /* Rx descriptor address set */
@@ -1294,7 +1310,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
        /* Frame recv control (enable multiple-packets per rx irq) */
        sh_eth_write(ndev, RMCR_RNC, RMCR);
 
-       sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
+       sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
 
        if (mdp->cd->bculr)
                sh_eth_write(ndev, 0x800, BCULR);       /* Burst sycle set */
@@ -1309,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
                     RFLR);
 
        sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
-       if (start)
+       if (start) {
+               mdp->irq_enabled = true;
                sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+       }
 
        /* PAUSE Prohibition */
        val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -1349,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
        return ret;
 }
 
+static void sh_eth_dev_exit(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       int i;
+
+       /* Deactivate all TX descriptors, so DMA should stop at next
+        * packet boundary if it's currently running
+        */
+       for (i = 0; i < mdp->num_tx_ring; i++)
+               mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
+
+       /* Disable TX FIFO egress to MAC */
+       sh_eth_rcv_snd_disable(ndev);
+
+       /* Stop RX DMA at next packet boundary */
+       sh_eth_write(ndev, 0, EDRRR);
+
+       /* Aside from TX DMA, we can't tell when the hardware is
+        * really stopped, so we need to reset to make sure.
+        * Before doing that, wait for long enough to *probably*
+        * finish transmitting the last packet and poll stats.
+        */
+       msleep(2); /* max frame time at 10 Mbps < 1250 us */
+       sh_eth_get_stats(ndev);
+       sh_eth_reset(ndev);
+}
+
 /* free Tx skb function */
 static int sh_eth_txfree(struct net_device *ndev)
 {
@@ -1393,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
        u16 pkt_len = 0;
        u32 desc_status;
        int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+       dma_addr_t dma_addr;
 
        boguscnt = min(boguscnt, *quota);
        limit = boguscnt;
@@ -1440,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        mdp->rx_skbuff[entry] = NULL;
                        if (mdp->cd->rpadir)
                                skb_reserve(skb, NET_IP_ALIGN);
-                       dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
-                                               ALIGN(mdp->rx_buf_sz, 16),
-                                               DMA_FROM_DEVICE);
+                       dma_unmap_single(&ndev->dev, rxdesc->addr,
+                                        ALIGN(mdp->rx_buf_sz, 16),
+                                        DMA_FROM_DEVICE);
                        skb_put(skb, pkt_len);
                        skb->protocol = eth_type_trans(skb, ndev);
                        netif_receive_skb(skb);
@@ -1462,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
 
                if (mdp->rx_skbuff[entry] == NULL) {
                        skb = netdev_alloc_skb(ndev, skbuff_size);
-                       mdp->rx_skbuff[entry] = skb;
                        if (skb == NULL)
                                break;  /* Better luck next round. */
                        sh_eth_set_receive_align(skb);
-                       dma_map_single(&ndev->dev, skb->data,
-                                      rxdesc->buffer_length, DMA_FROM_DEVICE);
+                       dma_addr = dma_map_single(&ndev->dev, skb->data,
+                                                 rxdesc->buffer_length,
+                                                 DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&ndev->dev, dma_addr)) {
+                               kfree_skb(skb);
+                               break;
+                       }
+                       mdp->rx_skbuff[entry] = skb;
 
                        skb_checksum_none_assert(skb);
-                       rxdesc->addr = virt_to_phys(skb->data);
+                       rxdesc->addr = dma_addr;
                }
                if (entry >= mdp->num_rx_ring - 1)
                        rxdesc->status |=
@@ -1514,7 +1565,7 @@ static void sh_eth_rcv_snd_enable(struct net_device *ndev)
 }
 
 /* error control function */
-static void sh_eth_error(struct net_device *ndev, int intr_status)
+static void sh_eth_error(struct net_device *ndev, u32 intr_status)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
        u32 felic_stat;
@@ -1566,7 +1617,6 @@ ignore_link:
                if (intr_status & EESR_RFRMER) {
                        /* Receive Frame Overflow int */
                        ndev->stats.rx_frame_errors++;
-                       netif_err(mdp, rx_err, ndev, "Receive Abort\n");
                }
        }
 
@@ -1585,13 +1635,11 @@ ignore_link:
        if (intr_status & EESR_RDE) {
                /* Receive Descriptor Empty int */
                ndev->stats.rx_over_errors++;
-               netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
        }
 
        if (intr_status & EESR_RFE) {
                /* Receive FIFO Overflow int */
                ndev->stats.rx_fifo_errors++;
-               netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
        }
 
        if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
@@ -1630,7 +1678,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
        struct sh_eth_private *mdp = netdev_priv(ndev);
        struct sh_eth_cpu_data *cd = mdp->cd;
        irqreturn_t ret = IRQ_NONE;
-       unsigned long intr_status, intr_enable;
+       u32 intr_status, intr_enable;
 
        spin_lock(&mdp->lock);
 
@@ -1646,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
        if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
                ret = IRQ_HANDLED;
        else
-               goto other_irq;
+               goto out;
+
+       if (!likely(mdp->irq_enabled)) {
+               sh_eth_write(ndev, 0, EESIPR);
+               goto out;
+       }
 
        if (intr_status & EESR_RX_CHECK) {
                if (napi_schedule_prep(&mdp->napi)) {
@@ -1656,7 +1709,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
                        __napi_schedule(&mdp->napi);
                } else {
                        netdev_warn(ndev,
-                                   "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
+                                   "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
                                    intr_status, intr_enable);
                }
        }
@@ -1677,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
                sh_eth_error(ndev, intr_status);
        }
 
-other_irq:
+out:
        spin_unlock(&mdp->lock);
 
        return ret;
@@ -1689,7 +1742,7 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
                                                  napi);
        struct net_device *ndev = napi->dev;
        int quota = budget;
-       unsigned long intr_status;
+       u32 intr_status;
 
        for (;;) {
                intr_status = sh_eth_read(ndev, EESR);
@@ -1705,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
        napi_complete(napi);
 
        /* Reenable Rx interrupts */
-       sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+       if (mdp->irq_enabled)
+               sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
 out:
        return budget - quota;
 }
@@ -1820,6 +1874,9 @@ static int sh_eth_get_settings(struct net_device *ndev,
        unsigned long flags;
        int ret;
 
+       if (!mdp->phydev)
+               return -ENODEV;
+
        spin_lock_irqsave(&mdp->lock, flags);
        ret = phy_ethtool_gset(mdp->phydev, ecmd);
        spin_unlock_irqrestore(&mdp->lock, flags);
@@ -1834,6 +1891,9 @@ static int sh_eth_set_settings(struct net_device *ndev,
        unsigned long flags;
        int ret;
 
+       if (!mdp->phydev)
+               return -ENODEV;
+
        spin_lock_irqsave(&mdp->lock, flags);
 
        /* disable tx and rx */
@@ -1868,6 +1928,9 @@ static int sh_eth_nway_reset(struct net_device *ndev)
        unsigned long flags;
        int ret;
 
+       if (!mdp->phydev)
+               return -ENODEV;
+
        spin_lock_irqsave(&mdp->lock, flags);
        ret = phy_start_aneg(mdp->phydev);
        spin_unlock_irqrestore(&mdp->lock, flags);
@@ -1952,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
                return -EINVAL;
 
        if (netif_running(ndev)) {
+               netif_device_detach(ndev);
                netif_tx_disable(ndev);
-               /* Disable interrupts by clearing the interrupt mask. */
-               sh_eth_write(ndev, 0x0000, EESIPR);
-               /* Stop the chip's Tx and Rx processes. */
-               sh_eth_write(ndev, 0, EDTRR);
-               sh_eth_write(ndev, 0, EDRRR);
+
+               /* Serialise with the interrupt handler and NAPI, then
+                * disable interrupts.  We have to clear the
+                * irq_enabled flag first to ensure that interrupts
+                * won't be re-enabled.
+                */
+               mdp->irq_enabled = false;
                synchronize_irq(ndev->irq);
-       }
+               napi_synchronize(&mdp->napi);
+               sh_eth_write(ndev, 0x0000, EESIPR);
 
-       /* Free all the skbuffs in the Rx queue. */
-       sh_eth_ring_free(ndev);
-       /* Free DMA buffer */
-       sh_eth_free_dma_buffer(mdp);
+               sh_eth_dev_exit(ndev);
+
+               /* Free all the skbuffs in the Rx queue. */
+               sh_eth_ring_free(ndev);
+               /* Free DMA buffer */
+               sh_eth_free_dma_buffer(mdp);
+       }
 
        /* Set new parameters */
        mdp->num_rx_ring = ring->rx_pending;
        mdp->num_tx_ring = ring->tx_pending;
 
-       ret = sh_eth_ring_init(ndev);
-       if (ret < 0) {
-               netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
-               return ret;
-       }
-       ret = sh_eth_dev_init(ndev, false);
-       if (ret < 0) {
-               netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
-               return ret;
-       }
-
        if (netif_running(ndev)) {
+               ret = sh_eth_ring_init(ndev);
+               if (ret < 0) {
+                       netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
+                                  __func__);
+                       return ret;
+               }
+               ret = sh_eth_dev_init(ndev, false);
+               if (ret < 0) {
+                       netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
+                                  __func__);
+                       return ret;
+               }
+
+               mdp->irq_enabled = true;
                sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
                /* Setting the Rx mode will start the Rx process. */
                sh_eth_write(ndev, EDRRR_R, EDRRR);
-               netif_wake_queue(ndev);
+               netif_device_attach(ndev);
        }
 
        return 0;
@@ -2060,7 +2133,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
 
        netif_err(mdp, timer, ndev,
                  "transmit timed out, status %8.8x, resetting...\n",
-                 (int)sh_eth_read(ndev, EESR));
+                 sh_eth_read(ndev, EESR));
 
        /* tx_errors count up */
        ndev->stats.tx_errors++;
@@ -2101,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        }
        spin_unlock_irqrestore(&mdp->lock, flags);
 
+       if (skb_padto(skb, ETH_ZLEN))
+               return NETDEV_TX_OK;
+
        entry = mdp->cur_tx % mdp->num_tx_ring;
        mdp->tx_skbuff[entry] = skb;
        txdesc = &mdp->tx_ring[entry];
@@ -2110,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                                 skb->len + 2);
        txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
                                      DMA_TO_DEVICE);
-       if (skb->len < ETH_ZLEN)
-               txdesc->buffer_length = ETH_ZLEN;
-       else
-               txdesc->buffer_length = skb->len;
+       if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
+               kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+       txdesc->buffer_length = skb->len;
 
        if (entry >= mdp->num_tx_ring - 1)
                txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
@@ -2165,24 +2242,26 @@ static int sh_eth_close(struct net_device *ndev)
 
        netif_stop_queue(ndev);
 
-       /* Disable interrupts by clearing the interrupt mask. */
+       /* Serialise with the interrupt handler and NAPI, then disable
+        * interrupts.  We have to clear the irq_enabled flag first to
+        * ensure that interrupts won't be re-enabled.
+        */
+       mdp->irq_enabled = false;
+       synchronize_irq(ndev->irq);
+       napi_disable(&mdp->napi);
        sh_eth_write(ndev, 0x0000, EESIPR);
 
-       /* Stop the chip's Tx and Rx processes. */
-       sh_eth_write(ndev, 0, EDTRR);
-       sh_eth_write(ndev, 0, EDRRR);
+       sh_eth_dev_exit(ndev);
 
-       sh_eth_get_stats(ndev);
        /* PHY Disconnect */
        if (mdp->phydev) {
                phy_stop(mdp->phydev);
                phy_disconnect(mdp->phydev);
+               mdp->phydev = NULL;
        }
 
        free_irq(ndev->irq, ndev);
 
-       napi_disable(&mdp->napi);
-
        /* Free all the skbuffs in the Rx queue. */
        sh_eth_ring_free(ndev);
 
@@ -2410,7 +2489,7 @@ static int sh_eth_tsu_purge_all(struct net_device *ndev)
        struct sh_eth_private *mdp = netdev_priv(ndev);
        int i, ret;
 
-       if (unlikely(!mdp->cd->tsu))
+       if (!mdp->cd->tsu)
                return 0;
 
        for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
@@ -2433,7 +2512,7 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
        void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
        int i;
 
-       if (unlikely(!mdp->cd->tsu))
+       if (!mdp->cd->tsu)
                return;
 
        for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
@@ -2443,8 +2522,8 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
        }
 }
 
-/* Multicast reception directions set */
-static void sh_eth_set_multicast_list(struct net_device *ndev)
+/* Update promiscuous flag and multicast filter */
+static void sh_eth_set_rx_mode(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
        u32 ecmr_bits;
@@ -2455,7 +2534,9 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
        /* Initial condition is MCT = 1, PRM = 0.
         * Depending on ndev->flags, set PRM or clear MCT
         */
-       ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
+       ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
+       if (mdp->cd->tsu)
+               ecmr_bits |= ECMR_MCT;
 
        if (!(ndev->flags & IFF_MULTICAST)) {
                sh_eth_tsu_purge_mcast(ndev);
@@ -2484,9 +2565,6 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
                                }
                        }
                }
-       } else {
-               /* Normal, unicast/broadcast-only mode. */
-               ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
        }
 
        /* update the ethernet mode */
@@ -2694,6 +2772,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
        .ndo_stop               = sh_eth_close,
        .ndo_start_xmit         = sh_eth_start_xmit,
        .ndo_get_stats          = sh_eth_get_stats,
+       .ndo_set_rx_mode        = sh_eth_set_rx_mode,
        .ndo_tx_timeout         = sh_eth_tx_timeout,
        .ndo_do_ioctl           = sh_eth_do_ioctl,
        .ndo_validate_addr      = eth_validate_addr,
@@ -2706,7 +2785,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
        .ndo_stop               = sh_eth_close,
        .ndo_start_xmit         = sh_eth_start_xmit,
        .ndo_get_stats          = sh_eth_get_stats,
-       .ndo_set_rx_mode        = sh_eth_set_multicast_list,
+       .ndo_set_rx_mode        = sh_eth_set_rx_mode,
        .ndo_vlan_rx_add_vid    = sh_eth_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = sh_eth_vlan_rx_kill_vid,
        .ndo_tx_timeout         = sh_eth_tx_timeout,
@@ -2940,6 +3019,36 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
 }
 
 #ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+static int sh_eth_suspend(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       int ret = 0;
+
+       if (netif_running(ndev)) {
+               netif_device_detach(ndev);
+               ret = sh_eth_close(ndev);
+       }
+
+       return ret;
+}
+
+static int sh_eth_resume(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       int ret = 0;
+
+       if (netif_running(ndev)) {
+               ret = sh_eth_open(ndev);
+               if (ret < 0)
+                       return ret;
+               netif_device_attach(ndev);
+       }
+
+       return ret;
+}
+#endif
+
 static int sh_eth_runtime_nop(struct device *dev)
 {
        /* Runtime PM callback shared between ->runtime_suspend()
@@ -2953,8 +3062,8 @@ static int sh_eth_runtime_nop(struct device *dev)
 }
 
 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
-       .runtime_suspend = sh_eth_runtime_nop,
-       .runtime_resume = sh_eth_runtime_nop,
+       SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
+       SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
 };
 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
 #else
index 22301bf9c21daeb925d75aa7ce5c7a588d977e11..259d03f353e109709abfbaac4a447d9f4af82026 100644 (file)
@@ -369,6 +369,8 @@ enum DESC_I_BIT {
        DESC_I_RINT1 = 0x0001,
 };
 
+#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2)
+
 /* RPADIR */
 enum RPADIR_BIT {
        RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
@@ -457,18 +459,21 @@ struct sh_eth_cpu_data {
 
        /* mandatory initialize value */
        int register_type;
-       unsigned long eesipr_value;
+       u32 eesipr_value;
 
        /* optional initialize value */
-       unsigned long ecsr_value;
-       unsigned long ecsipr_value;
-       unsigned long fdr_value;
-       unsigned long fcftr_value;
-       unsigned long rpadir_value;
+       u32 ecsr_value;
+       u32 ecsipr_value;
+       u32 fdr_value;
+       u32 fcftr_value;
+       u32 rpadir_value;
 
        /* interrupt checking mask */
-       unsigned long tx_check;
-       unsigned long eesr_err_check;
+       u32 tx_check;
+       u32 eesr_err_check;
+
+       /* Error mask */
+       u32 trscer_err_mask;
 
        /* hardware features */
        unsigned long irq_flags; /* IRQ configuration flags */
@@ -508,6 +513,7 @@ struct sh_eth_private {
        u32 rx_buf_sz;                  /* Based on MTU+slack. */
        int edmac_endian;
        struct napi_struct napi;
+       bool irq_enabled;
        /* MII transceiver section. */
        u32 phy_id;                     /* PHY ID */
        struct mii_bus *mii_bus;        /* MDIO bus control */
@@ -537,7 +543,7 @@ static inline void sh_eth_soft_swap(char *src, int len)
 #endif
 }
 
-static inline void sh_eth_write(struct net_device *ndev, unsigned long data,
+static inline void sh_eth_write(struct net_device *ndev, u32 data,
                                int enum_index)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -545,8 +551,7 @@ static inline void sh_eth_write(struct net_device *ndev, unsigned long data,
        iowrite32(data, mdp->addr + mdp->reg_offset[enum_index]);
 }
 
-static inline unsigned long sh_eth_read(struct net_device *ndev,
-                                       int enum_index)
+static inline u32 sh_eth_read(struct net_device *ndev, int enum_index)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
 
@@ -559,14 +564,13 @@ static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
        return mdp->tsu_addr + mdp->reg_offset[enum_index];
 }
 
-static inline void sh_eth_tsu_write(struct sh_eth_private *mdp,
-                               unsigned long data, int enum_index)
+static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
+                                   int enum_index)
 {
        iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
 }
 
-static inline unsigned long sh_eth_tsu_read(struct sh_eth_private *mdp,
-                                       int enum_index)
+static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
 {
        return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
 }
index 2f398fa4b9e607546e8f1255ec7a53bfa018114c..34389b6aa67cbd26263366ca1bff769f1b27c68a 100644 (file)
@@ -806,13 +806,13 @@ static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
 
 static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
 {
-       return (void *) desc_info->desc->cookie;
+       return (void *)(uintptr_t)desc_info->desc->cookie;
 }
 
 static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
                                       void *ptr)
 {
-       desc_info->desc->cookie = (long) ptr;
+       desc_info->desc->cookie = (uintptr_t) ptr;
 }
 
 static struct rocker_desc_info *
@@ -3026,11 +3026,17 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
                container_of(work, struct rocker_fdb_learn_work, work);
        bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
        bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
+       struct netdev_switch_notifier_fdb_info info;
+
+       info.addr = lw->addr;
+       info.vid = lw->vid;
 
        if (learned && removing)
-               br_fdb_external_learn_del(lw->dev, lw->addr, lw->vid);
+               call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
+                                            lw->dev, &info.info);
        else if (learned && !removing)
-               br_fdb_external_learn_add(lw->dev, lw->addr, lw->vid);
+               call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
+                                            lw->dev, &info.info);
 
        kfree(work);
 }
@@ -3565,6 +3571,8 @@ nest_cancel:
        rocker_tlv_nest_cancel(desc_info, frags);
 out:
        dev_kfree_skb(skb);
+       dev->stats.tx_dropped++;
+
        return NETDEV_TX_OK;
 }
 
@@ -3668,7 +3676,8 @@ static int rocker_fdb_fill_info(struct sk_buff *skb,
        if (vid && nla_put_u16(skb, NDA_VLAN, vid))
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -3713,7 +3722,7 @@ skip:
 }
 
 static int rocker_port_bridge_setlink(struct net_device *dev,
-                                     struct nlmsghdr *nlh)
+                                     struct nlmsghdr *nlh, u16 flags)
 {
        struct rocker_port *rocker_port = netdev_priv(dev);
        struct nlattr *protinfo;
@@ -3824,11 +3833,145 @@ static void rocker_port_get_drvinfo(struct net_device *dev,
        strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
 }
 
+static struct rocker_port_stats {
+       char str[ETH_GSTRING_LEN];
+       int type;
+} rocker_port_stats[] = {
+       { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS,    },
+       { "rx_bytes",   ROCKER_TLV_CMD_PORT_STATS_RX_BYTES,   },
+       { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
+       { "rx_errors",  ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS,  },
+
+       { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS,    },
+       { "tx_bytes",   ROCKER_TLV_CMD_PORT_STATS_TX_BYTES,   },
+       { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
+       { "tx_errors",  ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS,  },
+};
+
+#define ROCKER_PORT_STATS_LEN  ARRAY_SIZE(rocker_port_stats)
+
+static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
+                                   u8 *data)
+{
+       u8 *p = data;
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
+                       memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+               break;
+       }
+}
+
+static int
+rocker_cmd_get_port_stats_prep(struct rocker *rocker,
+                              struct rocker_port *rocker_port,
+                              struct rocker_desc_info *desc_info,
+                              void *priv)
+{
+       struct rocker_tlv *cmd_stats;
+
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+                              ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
+               return -EMSGSIZE;
+
+       cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+       if (!cmd_stats)
+               return -EMSGSIZE;
+
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_LPORT,
+                              rocker_port->lport))
+               return -EMSGSIZE;
+
+       rocker_tlv_nest_end(desc_info, cmd_stats);
+
+       return 0;
+}
+
+static int
+rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
+                                      struct rocker_port *rocker_port,
+                                      struct rocker_desc_info *desc_info,
+                                      void *priv)
+{
+       struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+       struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
+       struct rocker_tlv *pattr;
+       u32 lport;
+       u64 *data = priv;
+       int i;
+
+       rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
+
+       if (!attrs[ROCKER_TLV_CMD_INFO])
+               return -EIO;
+
+       rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
+                               attrs[ROCKER_TLV_CMD_INFO]);
+
+       if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT])
+               return -EIO;
+
+       lport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT]);
+       if (lport != rocker_port->lport)
+               return -EIO;
+
+       for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
+               pattr = stats_attrs[rocker_port_stats[i].type];
+               if (!pattr)
+                       continue;
+
+               data[i] = rocker_tlv_get_u64(pattr);
+       }
+
+       return 0;
+}
+
+static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
+                                            void *priv)
+{
+       return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+                              rocker_cmd_get_port_stats_prep, NULL,
+                              rocker_cmd_get_port_stats_ethtool_proc,
+                              priv, false);
+}
+
+static void rocker_port_get_stats(struct net_device *dev,
+                                 struct ethtool_stats *stats, u64 *data)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+
+       if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
+               int i;
+
+               for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
+                       data[i] = 0;
+       }
+
+       return;
+}
+
+static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return ROCKER_PORT_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 static const struct ethtool_ops rocker_port_ethtool_ops = {
        .get_settings           = rocker_port_get_settings,
        .set_settings           = rocker_port_set_settings,
        .get_drvinfo            = rocker_port_get_drvinfo,
        .get_link               = ethtool_op_get_link,
+       .get_strings            = rocker_port_get_strings,
+       .get_ethtool_stats      = rocker_port_get_stats,
+       .get_sset_count         = rocker_port_get_sset_count,
 };
 
 /*****************
@@ -3850,12 +3993,22 @@ static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
 
        /* Cleanup tx descriptors */
        while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
+               struct sk_buff *skb;
+
                err = rocker_desc_err(desc_info);
                if (err && net_ratelimit())
                        netdev_err(rocker_port->dev, "tx desc received with err %d\n",
                                   err);
                rocker_tx_desc_frags_unmap(rocker_port, desc_info);
-               dev_kfree_skb_any(rocker_desc_cookie_ptr_get(desc_info));
+
+               skb = rocker_desc_cookie_ptr_get(desc_info);
+               if (err == 0) {
+                       rocker_port->dev->stats.tx_packets++;
+                       rocker_port->dev->stats.tx_bytes += skb->len;
+               } else
+                       rocker_port->dev->stats.tx_errors++;
+
+               dev_kfree_skb_any(skb);
                credits++;
        }
 
@@ -3888,6 +4041,10 @@ static int rocker_port_rx_proc(struct rocker *rocker,
        rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
        skb_put(skb, rx_len);
        skb->protocol = eth_type_trans(skb, rocker_port->dev);
+
+       rocker_port->dev->stats.rx_packets++;
+       rocker_port->dev->stats.rx_bytes += skb->len;
+
        netif_receive_skb(skb);
 
        return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
@@ -3921,6 +4078,9 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
                                netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
                                           err);
                }
+               if (err)
+                       rocker_port->dev->stats.rx_errors++;
+
                rocker_desc_gen_clear(desc_info);
                rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
                credits++;
@@ -4004,7 +4164,8 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
                       NAPI_POLL_WEIGHT);
        rocker_carrier_init(rocker_port);
 
-       dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+       dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+                               NETIF_F_HW_SWITCH_OFFLOAD;
 
        err = register_netdev(dev);
        if (err) {
index 8d2865ba634c7e8babe6ca8e2f3ac05acaeaff41..a5bc432feada1986eb41a1f3fa935c9fa38d5e4d 100644 (file)
@@ -127,6 +127,9 @@ enum {
        ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL,
        ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS,
 
+       ROCKER_TLV_CMD_TYPE_CLEAR_PORT_STATS,
+       ROCKER_TLV_CMD_TYPE_GET_PORT_STATS,
+
        __ROCKER_TLV_CMD_TYPE_MAX,
        ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1,
 };
@@ -146,6 +149,24 @@ enum {
                        __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1,
 };
 
+enum {
+       ROCKER_TLV_CMD_PORT_STATS_UNSPEC,
+       ROCKER_TLV_CMD_PORT_STATS_LPORT,            /* u32 */
+
+       ROCKER_TLV_CMD_PORT_STATS_RX_PKTS,          /* u64 */
+       ROCKER_TLV_CMD_PORT_STATS_RX_BYTES,         /* u64 */
+       ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED,       /* u64 */
+       ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS,        /* u64 */
+
+       ROCKER_TLV_CMD_PORT_STATS_TX_PKTS,          /* u64 */
+       ROCKER_TLV_CMD_PORT_STATS_TX_BYTES,         /* u64 */
+       ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED,       /* u64 */
+       ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS,        /* u64 */
+
+       __ROCKER_TLV_CMD_PORT_STATS_MAX,
+       ROCKER_TLV_CMD_PORT_STATS_MAX = __ROCKER_TLV_CMD_PORT_STATS_MAX - 1,
+};
+
 enum rocker_port_mode {
        ROCKER_PORT_MODE_OF_DPA,
 };
index b6612d6090ac45de92b0524796abb5e11067a3a6..11288d4fc85f0d5440c4ca961fdcc0c5afe63d29 100644 (file)
@@ -473,13 +473,19 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
        /* allocate memory for RX skbuff array */
        rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
                                               sizeof(dma_addr_t), GFP_KERNEL);
-       if (rx_ring->rx_skbuff_dma == NULL)
-               goto dmamem_err;
+       if (!rx_ring->rx_skbuff_dma) {
+               dma_free_coherent(priv->device,
+                                 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+                                 rx_ring->dma_rx, rx_ring->dma_rx_phy);
+               goto error;
+       }
 
        rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
                                           sizeof(struct sk_buff *), GFP_KERNEL);
-       if (rx_ring->rx_skbuff == NULL)
-               goto rxbuff_err;
+       if (!rx_ring->rx_skbuff) {
+               kfree(rx_ring->rx_skbuff_dma);
+               goto error;
+       }
 
        /* initialise the buffers */
        for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
@@ -501,13 +507,6 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
 err_init_rx_buffers:
        while (--desc_index >= 0)
                free_rx_ring(priv->device, rx_ring, desc_index);
-       kfree(rx_ring->rx_skbuff);
-rxbuff_err:
-       kfree(rx_ring->rx_skbuff_dma);
-dmamem_err:
-       dma_free_coherent(priv->device,
-                         rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
-                         rx_ring->dma_rx, rx_ring->dma_rx_phy);
 error:
        return -ENOMEM;
 }
@@ -1272,7 +1271,7 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
                ctxt_desc_req = 1;
 
-       if (unlikely(vlan_tx_tag_present(skb) ||
+       if (unlikely(skb_vlan_tag_present(skb) ||
                     ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
                      tqueue->hwts_tx_en)))
                ctxt_desc_req = 1;
index 866560ea9e180d115513eead6d5b2a3bc1754f7b..b02eed12bfc5743117285216db1b4fe6ba19536e 100644 (file)
@@ -108,10 +108,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
                }
        }
 
-       /* Get MAC address if available (DT) */
-       if (mac)
-               ether_addr_copy(priv->dev->dev_addr, mac);
-
        priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr);
        if (!priv) {
                pr_err("%s: main driver probe failed\n", __func__);
@@ -125,6 +121,10 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
                goto err_drv_remove;
        }
 
+       /* Get MAC address if available (DT) */
+       if (mac)
+               ether_addr_copy(priv->dev->dev_addr, mac);
+
        /* Get the TX/RX IRQ numbers */
        for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
                priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++);
index 9468e64e6007bd2ee200e650c0355398ea750d5c..3e97a8b43147cc7c8371de91786997da7468df18 100644 (file)
@@ -5,8 +5,9 @@
 config NET_VENDOR_SMSC
        bool "SMC (SMSC)/Western Digital devices"
        default y
-       depends on ARM || ISA || MAC || ARM64 || MIPS || M32R || SUPERH || \
-               BLACKFIN || MN10300 || COLDFIRE || XTENSA || NIOS2 || PCI || PCMCIA
+       depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \
+                  ISA || M32R || MAC || MIPS || MN10300 || NIOS2 || PCI || \
+                  PCMCIA || SUPERH || XTENSA
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y
          and read the Ethernet-HOWTO, available from
@@ -38,8 +39,9 @@ config SMC91X
        tristate "SMC 91C9x/91C1xxx support"
        select CRC32
        select MII
-       depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \
-                   MN10300 || COLDFIRE || ARM64 || XTENSA || NIOS2) && (!OF || GPIOLIB)
+       depends on !OF || GPIOLIB
+       depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \
+                  M32R || MIPS || MN10300 || NIOS2 || SUPERH || XTENSA
        ---help---
          This is a driver for SMC's 91x series of Ethernet chipsets,
          including the SMC91C94 and the SMC91C111. Say Y if you want it
index 2a38dacbbd27fba4f153790117cb10102dadd6e6..be67baf5f6778d08df4eaa06216914b77ab8f2b5 100644 (file)
@@ -216,6 +216,27 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
 
 #include <unit/smc91111.h>
 
+#elif defined(CONFIG_ATARI)
+
+#define SMC_CAN_USE_8BIT        1
+#define SMC_CAN_USE_16BIT       1
+#define SMC_CAN_USE_32BIT       1
+#define SMC_NOWAIT              1
+
+#define SMC_inb(a, r)           readb((a) + (r))
+#define SMC_inw(a, r)           readw((a) + (r))
+#define SMC_inl(a, r)           readl((a) + (r))
+#define SMC_outb(v, a, r)       writeb(v, (a) + (r))
+#define SMC_outw(v, a, r)       writew(v, (a) + (r))
+#define SMC_outl(v, a, r)       writel(v, (a) + (r))
+#define SMC_insw(a, r, p, l)    readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l)   writesw((a) + (r), p, l)
+#define SMC_insl(a, r, p, l)    readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l)   writesl((a) + (r), p, l)
+
+#define RPC_LSA_DEFAULT         RPC_LED_100_10
+#define RPC_LSB_DEFAULT         RPC_LED_TX_RX
+
 #elif defined(CONFIG_ARCH_MSM)
 
 #define SMC_CAN_USE_8BIT       0
index 35f9b86bc9e545a38810e6858edb6ffcf6aa6a1b..6249a4ec08f05c3ddb7939ab4f5386fe75a8f551 100644 (file)
@@ -32,7 +32,7 @@
 struct rk_priv_data {
        struct platform_device *pdev;
        int phy_iface;
-       char regulator[32];
+       struct regulator *regulator;
 
        bool clk_enabled;
        bool clock_input;
@@ -287,47 +287,25 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
 
 static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
 {
-       struct regulator *ldo;
-       char *ldostr = bsp_priv->regulator;
+       struct regulator *ldo = bsp_priv->regulator;
        int ret;
        struct device *dev = &bsp_priv->pdev->dev;
 
-       if (!ldostr) {
-               dev_err(dev, "%s: no ldo found\n", __func__);
+       if (!ldo) {
+               dev_err(dev, "%s: no regulator found\n", __func__);
                return -1;
        }
 
-       ldo = regulator_get(NULL, ldostr);
-       if (!ldo) {
-               dev_err(dev, "\n%s get ldo %s failed\n", __func__, ldostr);
+       if (enable) {
+               ret = regulator_enable(ldo);
+               if (ret)
+                       dev_err(dev, "%s: fail to enable phy-supply\n",
+                               __func__);
        } else {
-               if (enable) {
-                       if (!regulator_is_enabled(ldo)) {
-                               regulator_set_voltage(ldo, 3300000, 3300000);
-                               ret = regulator_enable(ldo);
-                               if (ret != 0)
-                                       dev_err(dev, "%s: fail to enable %s\n",
-                                               __func__, ldostr);
-                               else
-                                       dev_info(dev, "turn on ldo done.\n");
-                       } else {
-                               dev_warn(dev, "%s is enabled before enable",
-                                        ldostr);
-                       }
-               } else {
-                       if (regulator_is_enabled(ldo)) {
-                               ret = regulator_disable(ldo);
-                               if (ret != 0)
-                                       dev_err(dev, "%s: fail to disable %s\n",
-                                               __func__, ldostr);
-                               else
-                                       dev_info(dev, "turn off ldo done.\n");
-                       } else {
-                               dev_warn(dev, "%s is disabled before disable",
-                                        ldostr);
-                       }
-               }
-               regulator_put(ldo);
+               ret = regulator_disable(ldo);
+               if (ret)
+                       dev_err(dev, "%s: fail to disable phy-supply\n",
+                               __func__);
        }
 
        return 0;
@@ -347,14 +325,14 @@ static void *rk_gmac_setup(struct platform_device *pdev)
 
        bsp_priv->phy_iface = of_get_phy_mode(dev->of_node);
 
-       ret = of_property_read_string(dev->of_node, "phy_regulator", &strings);
-       if (ret) {
-               dev_warn(dev, "%s: Can not read property: phy_regulator.\n",
-                        __func__);
-       } else {
-               dev_info(dev, "%s: PHY power controlled by regulator(%s).\n",
-                        __func__, strings);
-               strcpy(bsp_priv->regulator, strings);
+       bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
+       if (IS_ERR(bsp_priv->regulator)) {
+               if (PTR_ERR(bsp_priv->regulator) == -EPROBE_DEFER) {
+                       dev_err(dev, "phy regulator is not available yet, deferred probing\n");
+                       return ERR_PTR(-EPROBE_DEFER);
+               }
+               dev_err(dev, "no regulator found\n");
+               bsp_priv->regulator = NULL;
        }
 
        ret = of_property_read_string(dev->of_node, "clock_in_out", &strings);
index 056b358b4a72441d424a9b3cf0094919e383c600..bb6e2dc61bec7dc8baac541e7bf390759caf017f 100644 (file)
@@ -122,7 +122,7 @@ struct sti_dwmac {
        bool ext_phyclk;        /* Clock from external PHY */
        u32 tx_retime_src;      /* TXCLK Retiming*/
        struct clk *clk;        /* PHY clock */
-       int ctrl_reg;           /* GMAC glue-logic control register */
+       u32 ctrl_reg;           /* GMAC glue-logic control register */
        int clk_sel_reg;        /* GMAC ext clk selection register */
        struct device *dev;
        struct regmap *regmap;
@@ -285,11 +285,6 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
        if (!np)
                return -EINVAL;
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf");
-       if (!res)
-               return -ENODATA;
-       dwmac->ctrl_reg = res->start;
-
        /* clk selection from extra syscfg register */
        dwmac->clk_sel_reg = -ENXIO;
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf");
@@ -300,6 +295,12 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
        if (IS_ERR(regmap))
                return PTR_ERR(regmap);
 
+       err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg);
+       if (err) {
+               dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err);
+               return err;
+       }
+
        dwmac->dev = dev;
        dwmac->interface = of_get_phy_mode(np);
        dwmac->regmap = regmap;
index 8c6b7c1651e5f82329882a179fcca12e0a622982..55e89b3838f1cb60df3f2f751ba254eddbef8fa2 100644 (file)
@@ -1097,6 +1097,7 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
 
        priv->dirty_tx = 0;
        priv->cur_tx = 0;
+       netdev_reset_queue(priv->dev);
 
        stmmac_clear_descriptors(priv);
 
@@ -1287,7 +1288,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
                 *    that needs to not insert csum in the TDES.
                 */
                priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE);
-               tc = SF_DMA_MODE;
+               priv->xstats.threshold = SF_DMA_MODE;
        } else
                priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
 }
@@ -1300,6 +1301,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
 static void stmmac_tx_clean(struct stmmac_priv *priv)
 {
        unsigned int txsize = priv->dma_tx_size;
+       unsigned int bytes_compl = 0, pkts_compl = 0;
 
        spin_lock(&priv->tx_lock);
 
@@ -1356,6 +1358,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
                priv->hw->mode->clean_desc3(priv, p);
 
                if (likely(skb != NULL)) {
+                       pkts_compl++;
+                       bytes_compl += skb->len;
                        dev_consume_skb_any(skb);
                        priv->tx_skbuff[entry] = NULL;
                }
@@ -1364,6 +1368,9 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
 
                priv->dirty_tx++;
        }
+
+       netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
+
        if (unlikely(netif_queue_stopped(priv->dev) &&
                     stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
                netif_tx_lock(priv->dev);
@@ -1418,6 +1425,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
                                                     (i == txsize - 1));
        priv->dirty_tx = 0;
        priv->cur_tx = 0;
+       netdev_reset_queue(priv->dev);
        priv->hw->dma->start_tx(priv->ioaddr);
 
        priv->dev->stats.tx_errors++;
@@ -1444,9 +1452,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
        }
        if (unlikely(status & tx_hard_error_bump_tc)) {
                /* Try to bump up the dma threshold on this failure */
-               if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
+               if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
+                   (tc <= 256)) {
                        tc += 64;
-                       priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
+                       if (priv->plat->force_thresh_dma_mode)
+                               priv->hw->dma->dma_mode(priv->ioaddr, tc, tc);
+                       else
+                               priv->hw->dma->dma_mode(priv->ioaddr, tc,
+                                       SF_DMA_MODE);
                        priv->xstats.threshold = tc;
                }
        } else if (unlikely(status == tx_hard_error))
@@ -2050,6 +2063,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        if (!priv->hwts_tx_en)
                skb_tx_timestamp(skb);
 
+       netdev_sent_queue(dev, skb->len);
        priv->hw->dma->enable_dma_transmission(priv->ioaddr);
 
        spin_unlock(&priv->tx_lock);
@@ -2742,7 +2756,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
                priv->plat->enh_desc = priv->dma_cap.enh_desc;
                priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
 
-               priv->plat->tx_coe = priv->dma_cap.tx_coe;
+               /* TXCOE doesn't work in thresh DMA mode */
+               if (priv->plat->force_thresh_dma_mode)
+                       priv->plat->tx_coe = 0;
+               else
+                       priv->plat->tx_coe = priv->dma_cap.tx_coe;
 
                if (priv->dma_cap.rx_coe_type2)
                        priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
@@ -2778,6 +2796,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
  * @addr: iobase memory address
  * Description: this is the main probe function used to
  * call the alloc_etherdev, allocate the priv structure.
+ * Return:
+ * on success the new private structure is returned, otherwise the error
+ * pointer.
  */
 struct stmmac_priv *stmmac_dvr_probe(struct device *device,
                                     struct plat_stmmacenet_data *plat_dat,
@@ -2789,7 +2810,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
 
        ndev = alloc_etherdev(sizeof(struct stmmac_priv));
        if (!ndev)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        SET_NETDEV_DEV(ndev, device);
 
index 054520d67de4a93c4ca14098b8c27b5f6d44cde8..3bca908716e2a5c52a82ee6e6d8e67b9b10d23c6 100644 (file)
 *******************************************************************************/
 
 #include <linux/pci.h>
+#include <linux/dmi.h>
+
 #include "stmmac.h"
 
+/*
+ * This struct is used to associate PCI Function of MAC controller on a board,
+ * discovered via DMI, with the address of PHY connected to the MAC. The
+ * negative value of the address means that MAC controller is not connected
+ * with PHY.
+ */
+struct stmmac_pci_dmi_data {
+       const char *name;
+       unsigned int func;
+       int phy_addr;
+};
+
+struct stmmac_pci_info {
+       struct pci_dev *pdev;
+       int (*setup)(struct plat_stmmacenet_data *plat,
+                    struct stmmac_pci_info *info);
+       struct stmmac_pci_dmi_data *dmi;
+};
+
+static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
+{
+       const char *name = dmi_get_system_info(DMI_BOARD_NAME);
+       unsigned int func = PCI_FUNC(info->pdev->devfn);
+       struct stmmac_pci_dmi_data *dmi;
+
+       /*
+        * Galileo boards with old firmware don't support DMI. We always return
+        * 1 here, so at least first found MAC controller would be probed.
+        */
+       if (!name)
+               return 1;
+
+       for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) {
+               if (!strcmp(dmi->name, name) && dmi->func == func)
+                       return dmi->phy_addr;
+       }
+
+       return -ENODEV;
+}
+
 static void stmmac_default_data(struct plat_stmmacenet_data *plat)
 {
        plat->bus_id = 1;
@@ -48,6 +90,62 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
        plat->unicast_filter_entries = 1;
 }
 
+static int quark_default_data(struct plat_stmmacenet_data *plat,
+                             struct stmmac_pci_info *info)
+{
+       struct pci_dev *pdev = info->pdev;
+       int ret;
+
+       /*
+        * Refuse to load the driver and register net device if MAC controller
+        * does not connect to any PHY interface.
+        */
+       ret = stmmac_pci_find_phy_addr(info);
+       if (ret < 0)
+               return ret;
+
+       plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
+       plat->phy_addr = ret;
+       plat->interface = PHY_INTERFACE_MODE_RMII;
+       plat->clk_csr = 2;
+       plat->has_gmac = 1;
+       plat->force_sf_dma_mode = 1;
+
+       plat->mdio_bus_data->phy_reset = NULL;
+       plat->mdio_bus_data->phy_mask = 0;
+
+       plat->dma_cfg->pbl = 16;
+       plat->dma_cfg->burst_len = DMA_AXI_BLEN_256;
+       plat->dma_cfg->fixed_burst = 1;
+
+       /* Set default value for multicast hash bins */
+       plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+       /* Set default value for unicast filter entries */
+       plat->unicast_filter_entries = 1;
+
+       return 0;
+}
+
+static struct stmmac_pci_dmi_data quark_pci_dmi_data[] = {
+       {
+               .name = "Galileo",
+               .func = 6,
+               .phy_addr = 1,
+       },
+       {
+               .name = "GalileoGen2",
+               .func = 6,
+               .phy_addr = 1,
+       },
+       {}
+};
+
+static struct stmmac_pci_info quark_pci_info = {
+       .setup = quark_default_data,
+       .dmi = quark_pci_dmi_data,
+};
+
 /**
  * stmmac_pci_probe
  *
@@ -63,6 +161,7 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
 static int stmmac_pci_probe(struct pci_dev *pdev,
                            const struct pci_device_id *id)
 {
+       struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
        struct plat_stmmacenet_data *plat;
        struct stmmac_priv *priv;
        int i;
@@ -103,7 +202,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
-       stmmac_default_data(plat);
+       if (info) {
+               info->pdev = pdev;
+               if (info->setup) {
+                       ret = info->setup(plat, info);
+                       if (ret)
+                               return ret;
+               }
+       } else
+               stmmac_default_data(plat);
+
+       pci_enable_msi(pdev);
 
        priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]);
        if (IS_ERR(priv)) {
@@ -155,11 +264,13 @@ static int stmmac_pci_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
 
 #define STMMAC_VENDOR_ID 0x700
+#define STMMAC_QUARK_ID  0x0937
 #define STMMAC_DEVICE_ID 0x1108
 
 static const struct pci_device_id stmmac_id_table[] = {
        {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)},
+       {PCI_VDEVICE(INTEL, STMMAC_QUARK_ID), (kernel_ulong_t)&quark_pci_info},
        {}
 };
 
index 879e29f48a8933002c291635dce0cf75785b99d1..fb846ebba1d9b0860acf920356aedfaf9d967f27 100644 (file)
@@ -235,6 +235,9 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
                        of_property_read_bool(np, "snps,fixed-burst");
                dma_cfg->mixed_burst =
                        of_property_read_bool(np, "snps,mixed-burst");
+               of_property_read_u32(np, "snps,burst_len", &dma_cfg->burst_len);
+               if (dma_cfg->burst_len < 0 || dma_cfg->burst_len > 256)
+                       dma_cfg->burst_len = 0;
        }
        plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
        if (plat->force_thresh_dma_mode) {
index 0c64162138377e31cd8a8a7b09adb42a1124cd39..4b51f903fb733cba9b9b8a3fe9539fe3bc811c84 100644 (file)
@@ -3341,8 +3341,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
 
        niu_hash_page(rp, page, addr);
        if (rp->rbr_blocks_per_page > 1)
-               atomic_add(rp->rbr_blocks_per_page - 1,
-                          &compound_head(page)->_count);
+               atomic_add(rp->rbr_blocks_per_page - 1, &page->_count);
 
        for (i = 0; i < rp->rbr_blocks_per_page; i++) {
                __le32 *rbr = &rp->rbr[start_index + i];
index d2835bf7b4fbef1744bf2bd6d840acfe47863a39..2b10b85d8a0881ba584cbdf3ea337edbea0c448f 100644 (file)
@@ -50,6 +50,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
 #define        VNET_MAX_RETRIES        10
 
 static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
+static void vnet_port_reset(struct vnet_port *port);
 
 /* Ordered from largest major to lowest */
 static struct vio_version vnet_versions[] = {
@@ -351,10 +352,15 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
        unsigned int len = desc->size;
        unsigned int copy_len;
        struct sk_buff *skb;
+       int maxlen;
        int err;
 
        err = -EMSGSIZE;
-       if (unlikely(len < ETH_ZLEN || len > port->rmtu)) {
+       if (port->tso && port->tsolen > port->rmtu)
+               maxlen = port->tsolen;
+       else
+               maxlen = port->rmtu;
+       if (unlikely(len < ETH_ZLEN || len > maxlen)) {
                dev->stats.rx_length_errors++;
                goto out_dropped;
        }
@@ -731,9 +737,7 @@ ldc_ctrl:
                vio_link_state_change(vio, event);
 
                if (event == LDC_EVENT_RESET) {
-                       port->rmtu = 0;
-                       port->tso = true;
-                       port->tsolen = 0;
+                       vnet_port_reset(port);
                        vio_port_up(vio);
                }
                port->rx_event = 0;
@@ -929,36 +933,36 @@ static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
 
        *pending = 0;
 
-       txi = dr->prod-1;
-       if (txi < 0)
-               txi = VNET_TX_RING_SIZE-1;
-
+       txi = dr->prod;
        for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
                struct vio_net_desc *d;
 
-               d = vio_dring_entry(dr, txi);
-
-               if (d->hdr.state == VIO_DESC_DONE) {
-                       if (port->tx_bufs[txi].skb) {
-                               BUG_ON(port->tx_bufs[txi].skb->next);
+               --txi;
+               if (txi < 0)
+                       txi = VNET_TX_RING_SIZE-1;
 
-                               port->tx_bufs[txi].skb->next = skb;
-                               skb = port->tx_bufs[txi].skb;
-                               port->tx_bufs[txi].skb = NULL;
+               d = vio_dring_entry(dr, txi);
 
-                               ldc_unmap(port->vio.lp,
-                                         port->tx_bufs[txi].cookies,
-                                         port->tx_bufs[txi].ncookies);
-                       }
-                       d->hdr.state = VIO_DESC_FREE;
-               } else if (d->hdr.state == VIO_DESC_READY) {
+               if (d->hdr.state == VIO_DESC_READY) {
                        (*pending)++;
-               } else if (d->hdr.state == VIO_DESC_FREE) {
-                       break;
+                       continue;
                }
-               --txi;
-               if (txi < 0)
-                       txi = VNET_TX_RING_SIZE-1;
+               if (port->tx_bufs[txi].skb) {
+                       if (d->hdr.state != VIO_DESC_DONE)
+                               pr_notice("invalid ring buffer state %d\n",
+                                         d->hdr.state);
+                       BUG_ON(port->tx_bufs[txi].skb->next);
+
+                       port->tx_bufs[txi].skb->next = skb;
+                       skb = port->tx_bufs[txi].skb;
+                       port->tx_bufs[txi].skb = NULL;
+
+                       ldc_unmap(port->vio.lp,
+                                 port->tx_bufs[txi].cookies,
+                                 port->tx_bufs[txi].ncookies);
+               } else if (d->hdr.state == VIO_DESC_FREE)
+                       break;
+               d->hdr.state = VIO_DESC_FREE;
        }
        return skb;
 }
@@ -1119,6 +1123,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
                        skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
                        skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
                }
+               nskb->queue_mapping = skb->queue_mapping;
                dev_kfree_skb(skb);
                skb = nskb;
        }
@@ -1632,16 +1637,9 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
        int i;
 
        dr = &port->vio.drings[VIO_DRIVER_TX_RING];
-       if (dr->base) {
-               ldc_free_exp_dring(port->vio.lp, dr->base,
-                                  (dr->entry_size * dr->num_entries),
-                                  dr->cookies, dr->ncookies);
-               dr->base = NULL;
-               dr->entry_size = 0;
-               dr->num_entries = 0;
-               dr->pending = 0;
-               dr->ncookies = 0;
-       }
+
+       if (dr->base == NULL)
+               return;
 
        for (i = 0; i < VNET_TX_RING_SIZE; i++) {
                struct vio_net_desc *d;
@@ -1651,8 +1649,6 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
                        continue;
 
                d = vio_dring_entry(dr, i);
-               if (d->hdr.state == VIO_DESC_READY)
-                       pr_warn("active transmit buffers freed\n");
 
                ldc_unmap(port->vio.lp,
                          port->tx_bufs[i].cookies,
@@ -1661,6 +1657,23 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
                port->tx_bufs[i].skb = NULL;
                d->hdr.state = VIO_DESC_FREE;
        }
+       ldc_free_exp_dring(port->vio.lp, dr->base,
+                          (dr->entry_size * dr->num_entries),
+                          dr->cookies, dr->ncookies);
+       dr->base = NULL;
+       dr->entry_size = 0;
+       dr->num_entries = 0;
+       dr->pending = 0;
+       dr->ncookies = 0;
+}
+
+static void vnet_port_reset(struct vnet_port *port)
+{
+       del_timer(&port->clean_timer);
+       vnet_port_free_tx_bufs(port);
+       port->rmtu = 0;
+       port->tso = true;
+       port->tsolen = 0;
 }
 
 static int vnet_port_alloc_tx_ring(struct vnet_port *port)
index 6ab36d9ff2abf0b846667e71dc238a63a670db23..a9cac8413e49e8062875d93af33b8ea4d41061b6 100644 (file)
@@ -1650,9 +1650,9 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
                    txd_mss);
        }
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                /*Cut VLAN ID to 12 bits */
-               txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12);
+               txd_vlan_id = skb_vlan_tag_get(skb) & BITS_MASK(12);
                txd_vtag = 1;
        }
 
index 605dd909bcc32fb4f95520b1a210a438a30649c2..3bc992cd70b7de4449afec00cdab816d9910ec22 100644 (file)
@@ -56,12 +56,18 @@ config TI_CPSW_PHY_SEL
          This driver supports configuring of the phy mode connected to
          the CPSW.
 
+config TI_CPSW_ALE
+       tristate "TI CPSW ALE Support"
+       ---help---
+         This driver supports TI's CPSW ALE module.
+
 config TI_CPSW
        tristate "TI CPSW Switch Support"
        depends on ARCH_DAVINCI || ARCH_OMAP2PLUS
        select TI_DAVINCI_CPDMA
        select TI_DAVINCI_MDIO
        select TI_CPSW_PHY_SEL
+       select TI_CPSW_ALE
        select MFD_SYSCON
        select REGMAP
        ---help---
@@ -79,6 +85,25 @@ config TI_CPTS
          the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4
          and Layer 2 packets, and the driver offers a PTP Hardware Clock.
 
+config TI_KEYSTONE_NETCP
+       tristate "TI Keystone NETCP Core Support"
+       select TI_CPSW_ALE
+       depends on OF
+       depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS
+       ---help---
+         This driver supports TI's Keystone NETCP Core.
+
+         To compile this driver as a module, choose M here: the module
+         will be called keystone_netcp.
+
+config TI_KEYSTONE_NETCP_ETHSS
+       depends on TI_KEYSTONE_NETCP
+       tristate "TI Keystone NETCP Ethernet subsystem Support"
+       ---help---
+
+         To compile this driver as a module, choose M here: the module
+         will be called keystone_netcp_ethss.
+
 config TLAN
        tristate "TI ThunderLAN support"
        depends on (PCI || EISA)
index 9cfaab8152be08aa16bc37c91f622ba8e287989b..d420d9413e4a9ba6e27ef98cfac8c74fdb723ec1 100644 (file)
@@ -2,11 +2,20 @@
 # Makefile for the TI network device drivers.
 #
 
+obj-$(CONFIG_TI_CPSW) += cpsw-common.o
+obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
+
 obj-$(CONFIG_TLAN) += tlan.o
 obj-$(CONFIG_CPMAC) += cpmac.o
 obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
 obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
 obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
 obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
+obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
 obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
-ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
+ti_cpsw-y := cpsw.o cpts.o
+
+obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
+keystone_netcp-y := netcp_core.o
+obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
+keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
new file mode 100644 (file)
index 0000000..f595094
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include "cpsw.h"
+
+#define AM33XX_CTRL_MAC_LO_REG(offset, id) ((offset) + 0x8 * (id))
+#define AM33XX_CTRL_MAC_HI_REG(offset, id) ((offset) + 0x8 * (id) + 0x4)
+
+int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
+                            u8 *mac_addr)
+{
+       u32 macid_lo;
+       u32 macid_hi;
+       struct regmap *syscon;
+
+       syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+       if (IS_ERR(syscon)) {
+               if (PTR_ERR(syscon) == -ENODEV)
+                       return 0;
+               return PTR_ERR(syscon);
+       }
+
+       regmap_read(syscon, AM33XX_CTRL_MAC_LO_REG(offset, slave),
+                   &macid_lo);
+       regmap_read(syscon, AM33XX_CTRL_MAC_HI_REG(offset, slave),
+                   &macid_hi);
+
+       mac_addr[5] = (macid_lo >> 8) & 0xff;
+       mac_addr[4] = macid_lo & 0xff;
+       mac_addr[3] = (macid_hi >> 24) & 0xff;
+       mac_addr[2] = (macid_hi >> 16) & 0xff;
+       mac_addr[1] = (macid_hi >> 8) & 0xff;
+       mac_addr[0] = macid_hi & 0xff;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(cpsw_am33xx_cm_get_macid);
+
+MODULE_LICENSE("GPL");
index c560f9aeb55d691f23c65dae362c18defa1e9e44..7d8dd0d2182ef9f8d94d1e84b3c7a45f3364347c 100644 (file)
@@ -33,8 +33,6 @@
 #include <linux/of_net.h>
 #include <linux/of_device.h>
 #include <linux/if_vlan.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
 
 #include <linux/pinctrl/consumer.h>
 
@@ -610,7 +608,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
 
                        /* Clear all mcast from ALE */
                        cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
-                                                priv->host_port);
+                                                priv->host_port, -1);
 
                        /* Flood All Unicast Packets to Host port */
                        cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -634,6 +632,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
 {
        struct cpsw_priv *priv = netdev_priv(ndev);
+       int vid;
+
+       if (priv->data.dual_emac)
+               vid = priv->slaves[priv->emac_port].port_vlan;
+       else
+               vid = priv->data.default_vlan;
 
        if (ndev->flags & IFF_PROMISC) {
                /* Enable promiscuous mode */
@@ -649,7 +653,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
        cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
 
        /* Clear all mcast from ALE */
-       cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
+       cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
+                                vid);
 
        if (!netdev_mc_empty(ndev)) {
                struct netdev_hw_addr *ha;
@@ -754,10 +759,26 @@ requeue:
                dev_kfree_skb_any(new_skb);
 }
 
-static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
+static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
+{
+       struct cpsw_priv *priv = dev_id;
+
+       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+       cpdma_chan_process(priv->txch, 128);
+
+       priv = cpsw_get_slave_priv(priv, 1);
+       if (priv)
+               cpdma_chan_process(priv->txch, 128);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
 {
        struct cpsw_priv *priv = dev_id;
 
+       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+
        cpsw_intr_disable(priv);
        if (priv->irq_enabled == true) {
                cpsw_disable_irq(priv);
@@ -786,8 +807,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
        int                     num_tx, num_rx;
 
        num_tx = cpdma_chan_process(priv->txch, 128);
-       if (num_tx)
-               cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
 
        num_rx = cpdma_chan_process(priv->rxch, budget);
        if (num_rx < budget) {
@@ -795,7 +814,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
 
                napi_complete(napi);
                cpsw_intr_enable(priv);
-               cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
                prim_cpsw = cpsw_get_slave_priv(priv, 0);
                if (prim_cpsw->irq_enabled == false) {
                        prim_cpsw->irq_enabled = true;
@@ -1310,8 +1328,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
        napi_enable(&priv->napi);
        cpdma_ctlr_start(priv->dma);
        cpsw_intr_enable(priv);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
 
        prim_cpsw = cpsw_get_slave_priv(priv, 0);
        if (prim_cpsw->irq_enabled == false) {
@@ -1578,9 +1594,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
        cpdma_chan_start(priv->txch);
        cpdma_ctlr_int_ctrl(priv->dma, true);
        cpsw_intr_enable(priv);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-
 }
 
 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
@@ -1617,12 +1630,10 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
 
        cpsw_intr_disable(priv);
        cpdma_ctlr_int_ctrl(priv->dma, false);
-       cpsw_interrupt(ndev->irq, priv);
+       cpsw_rx_interrupt(priv->irqs_table[0], priv);
+       cpsw_tx_interrupt(priv->irqs_table[1], priv);
        cpdma_ctlr_int_ctrl(priv->dma, true);
        cpsw_intr_enable(priv);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-
 }
 #endif
 
@@ -1630,16 +1641,24 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
                                unsigned short vid)
 {
        int ret;
-       int unreg_mcast_mask;
+       int unreg_mcast_mask = 0;
+       u32 port_mask;
 
-       if (priv->ndev->flags & IFF_ALLMULTI)
-               unreg_mcast_mask = ALE_ALL_PORTS;
-       else
-               unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+       if (priv->data.dual_emac) {
+               port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
+
+               if (priv->ndev->flags & IFF_ALLMULTI)
+                       unreg_mcast_mask = port_mask;
+       } else {
+               port_mask = ALE_ALL_PORTS;
 
-       ret = cpsw_ale_add_vlan(priv->ale, vid,
-                               ALE_ALL_PORTS << priv->host_port,
-                               0, ALE_ALL_PORTS << priv->host_port,
+               if (priv->ndev->flags & IFF_ALLMULTI)
+                       unreg_mcast_mask = ALE_ALL_PORTS;
+               else
+                       unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+       }
+
+       ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask,
                                unreg_mcast_mask << priv->host_port);
        if (ret != 0)
                return ret;
@@ -1650,8 +1669,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
                goto clean_vid;
 
        ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
-                                ALE_ALL_PORTS << priv->host_port,
-                                ALE_VLAN, vid, 0);
+                                port_mask, ALE_VLAN, vid, 0);
        if (ret != 0)
                goto clean_vlan_ucast;
        return 0;
@@ -1672,6 +1690,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
        if (vid == priv->data.default_vlan)
                return 0;
 
+       if (priv->data.dual_emac) {
+               /* In dual EMAC, reserved VLAN id should not be used for
+                * creating VLAN interfaces as this can break the dual
+                * EMAC port separation
+                */
+               int i;
+
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (vid == priv->slaves[i].port_vlan)
+                               return -EINVAL;
+               }
+       }
+
        dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
        return cpsw_add_vlan_ale_entry(priv, vid);
 }
@@ -1685,6 +1716,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
        if (vid == priv->data.default_vlan)
                return 0;
 
+       if (priv->data.dual_emac) {
+               int i;
+
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (vid == priv->slaves[i].port_vlan)
+                               return -EINVAL;
+               }
+       }
+
        dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
        ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
        if (ret != 0)
@@ -1894,36 +1934,6 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
        slave->port_vlan = data->dual_emac_res_vlan;
 }
 
-#define AM33XX_CTRL_MAC_LO_REG(id) (0x630 + 0x8 * id)
-#define AM33XX_CTRL_MAC_HI_REG(id) (0x630 + 0x8 * id + 0x4)
-
-static int cpsw_am33xx_cm_get_macid(struct device *dev, int slave,
-               u8 *mac_addr)
-{
-       u32 macid_lo;
-       u32 macid_hi;
-       struct regmap *syscon;
-
-       syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
-       if (IS_ERR(syscon)) {
-               if (PTR_ERR(syscon) == -ENODEV)
-                       return 0;
-               return PTR_ERR(syscon);
-       }
-
-       regmap_read(syscon, AM33XX_CTRL_MAC_LO_REG(slave), &macid_lo);
-       regmap_read(syscon, AM33XX_CTRL_MAC_HI_REG(slave), &macid_hi);
-
-       mac_addr[5] = (macid_lo >> 8) & 0xff;
-       mac_addr[4] = macid_lo & 0xff;
-       mac_addr[3] = (macid_hi >> 24) & 0xff;
-       mac_addr[2] = (macid_hi >> 16) & 0xff;
-       mac_addr[1] = (macid_hi >> 8) & 0xff;
-       mac_addr[0] = macid_hi & 0xff;
-
-       return 0;
-}
-
 static int cpsw_probe_dt(struct cpsw_platform_data *data,
                         struct platform_device *pdev)
 {
@@ -2048,7 +2058,8 @@ no_phy_slave:
                        memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
                } else {
                        if (of_machine_is_compatible("ti,am33xx")) {
-                               ret = cpsw_am33xx_cm_get_macid(&pdev->dev, i,
+                               ret = cpsw_am33xx_cm_get_macid(&pdev->dev,
+                                                       0x630, i,
                                                        slave_data->mac_addr);
                                if (ret)
                                        return ret;
@@ -2159,7 +2170,8 @@ static int cpsw_probe(struct platform_device *pdev)
        void __iomem                    *ss_regs;
        struct resource                 *res, *ss_res;
        u32 slave_offset, sliver_offset, slave_size;
-       int ret = 0, i, k = 0;
+       int ret = 0, i;
+       int irq;
 
        ndev = alloc_etherdev(sizeof(struct cpsw_priv));
        if (!ndev) {
@@ -2341,31 +2353,47 @@ static int cpsw_probe(struct platform_device *pdev)
                goto clean_dma_ret;
        }
 
-       ndev->irq = platform_get_irq(pdev, 0);
+       ndev->irq = platform_get_irq(pdev, 1);
        if (ndev->irq < 0) {
                dev_err(priv->dev, "error getting irq resource\n");
                ret = -ENOENT;
                goto clean_ale_ret;
        }
 
-       while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
-               if (k >= ARRAY_SIZE(priv->irqs_table)) {
-                       ret = -EINVAL;
-                       goto clean_ale_ret;
-               }
+       /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
+        * MISC IRQs which are always kept disabled with this driver so
+        * we will not request them.
+        *
+        * If anyone wants to implement support for those, make sure to
+        * first request and append them to irqs_table array.
+        */
 
-               ret = devm_request_irq(&pdev->dev, res->start, cpsw_interrupt,
-                                      0, dev_name(&pdev->dev), priv);
-               if (ret < 0) {
-                       dev_err(priv->dev, "error attaching irq (%d)\n", ret);
-                       goto clean_ale_ret;
-               }
+       /* RX IRQ */
+       irq = platform_get_irq(pdev, 1);
+       if (irq < 0)
+               goto clean_ale_ret;
 
-               priv->irqs_table[k] = res->start;
-               k++;
+       priv->irqs_table[0] = irq;
+       ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
+                              0, dev_name(&pdev->dev), priv);
+       if (ret < 0) {
+               dev_err(priv->dev, "error attaching irq (%d)\n", ret);
+               goto clean_ale_ret;
        }
 
-       priv->num_irqs = k;
+       /* TX IRQ */
+       irq = platform_get_irq(pdev, 2);
+       if (irq < 0)
+               goto clean_ale_ret;
+
+       priv->irqs_table[1] = irq;
+       ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
+                              0, dev_name(&pdev->dev), priv);
+       if (ret < 0) {
+               dev_err(priv->dev, "error attaching irq (%d)\n", ret);
+               goto clean_ale_ret;
+       }
+       priv->num_irqs = 2;
 
        ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 
index 1b710674630c71d5732ccee80d095d7587769351..ca90efafd15691c03c36a0fa146d69bb51e75ff0 100644 (file)
@@ -41,5 +41,7 @@ struct cpsw_platform_data {
 };
 
 void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
+int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
+                            u8 *mac_addr);
 
 #endif /* __CPSW_H__ */
index 097ebe7077ac0c8de51e3eb7e8da5809f5e6bcea..6e927b4583aa4b9067433a9a2919ab8ea6a43fa9 100644 (file)
@@ -13,6 +13,7 @@
  * GNU General Public License for more details.
  */
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
@@ -146,7 +147,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
        return idx;
 }
 
-int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
+static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
 {
        u32 ale_entry[ALE_ENTRY_WORDS];
        int type, idx;
@@ -167,7 +168,7 @@ int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
        return -ENOENT;
 }
 
-int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid)
+static int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid)
 {
        u32 ale_entry[ALE_ENTRY_WORDS];
        int type, idx;
@@ -234,7 +235,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
                cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
 }
 
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
 {
        u32 ale_entry[ALE_ENTRY_WORDS];
        int ret, idx;
@@ -245,6 +246,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
                if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
                        continue;
 
+               /* if vid passed is -1 then remove all multicast entry from
+                * the table irrespective of vlan id, if a valid vlan id is
+                * passed then remove only multicast added to that vlan id.
+                * if vlan id doesn't match then move on to next entry.
+                */
+               if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
+                       continue;
+
                if (cpsw_ale_get_mcast(ale_entry)) {
                        u8 addr[6];
 
@@ -257,6 +266,7 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
        }
        return 0;
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_flush_multicast);
 
 static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
                                 int port_mask)
@@ -289,6 +299,7 @@ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
        }
        return 0;
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_flush);
 
 static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
                                                int flags, u16 vid)
@@ -326,6 +337,7 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
        cpsw_ale_write(ale, idx, ale_entry);
        return 0;
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_add_ucast);
 
 int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
                       int flags, u16 vid)
@@ -341,6 +353,7 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
        cpsw_ale_write(ale, idx, ale_entry);
        return 0;
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_del_ucast);
 
 int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
                       int flags, u16 vid, int mcast_state)
@@ -372,6 +385,7 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
        cpsw_ale_write(ale, idx, ale_entry);
        return 0;
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_add_mcast);
 
 int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
                       int flags, u16 vid)
@@ -393,6 +407,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
        cpsw_ale_write(ale, idx, ale_entry);
        return 0;
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast);
 
 int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
                      int reg_mcast, int unreg_mcast)
@@ -422,6 +437,7 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
        cpsw_ale_write(ale, idx, ale_entry);
        return 0;
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_add_vlan);
 
 int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
 {
@@ -442,6 +458,7 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
        cpsw_ale_write(ale, idx, ale_entry);
        return 0;
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_del_vlan);
 
 void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
 {
@@ -471,6 +488,7 @@ void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
                cpsw_ale_write(ale, idx, ale_entry);
        }
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_set_allmulti);
 
 struct ale_control_info {
        const char      *name;
@@ -696,6 +714,7 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_control_set);
 
 int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
 {
@@ -719,6 +738,7 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
        tmp = __raw_readl(ale->params.ale_regs + offset) >> shift;
        return tmp & BITMASK(info->bits);
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
 
 static void cpsw_ale_timer(unsigned long arg)
 {
@@ -742,6 +762,7 @@ int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout)
        }
        return 0;
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_set_ageout);
 
 void cpsw_ale_start(struct cpsw_ale *ale)
 {
@@ -761,11 +782,13 @@ void cpsw_ale_start(struct cpsw_ale *ale)
                add_timer(&ale->timer);
        }
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_start);
 
 void cpsw_ale_stop(struct cpsw_ale *ale)
 {
        del_timer_sync(&ale->timer);
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_stop);
 
 struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
 {
@@ -780,6 +803,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
 
        return ale;
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_create);
 
 int cpsw_ale_destroy(struct cpsw_ale *ale)
 {
@@ -789,6 +813,7 @@ int cpsw_ale_destroy(struct cpsw_ale *ale)
        kfree(ale);
        return 0;
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_destroy);
 
 void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
 {
@@ -799,3 +824,8 @@ void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
                data += ALE_ENTRY_WORDS;
        }
 }
+EXPORT_SYMBOL_GPL(cpsw_ale_dump);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI CPSW ALE driver");
+MODULE_AUTHOR("Texas Instruments");
index c0d4127aa549285c7e50e47214c1579e17478210..af1e7ecd87c6fbd24b80954c7977e96aa3676a0c 100644 (file)
@@ -92,7 +92,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
 
 int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
 int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
 int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
                       int flags, u16 vid);
 int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
index ea712512c7d1f5e155129bf073cac1d7381d38bb..aeebc0a7bf4769aa5b18fe03cec9f4a7a4cdd9d6 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/clk.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/semaphore.h>
 #include <linux/phy.h>
 #include <linux/bitops.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
+#include <linux/of_mdio.h>
 #include <linux/of_irq.h>
 #include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
 
 #include <asm/irq.h>
 #include <asm/page.h>
 
+#include "cpsw.h"
 #include "davinci_cpdma.h"
 
 static int debug_level;
@@ -343,9 +347,7 @@ struct emac_priv {
        u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
        u32 rx_addr_type;
        const char *phy_id;
-#ifdef CONFIG_OF
        struct device_node *phy_node;
-#endif
        struct phy_device *phydev;
        spinlock_t lock;
        /*platform specific members*/
@@ -922,6 +924,16 @@ static void emac_int_disable(struct emac_priv *priv)
                if (priv->int_disable)
                        priv->int_disable();
 
+               /* NOTE: Rx Threshold and Misc interrupts are not enabled */
+
+               /* ack rxen only then a new pulse will be generated */
+               emac_write(EMAC_DM646X_MACEOIVECTOR,
+                       EMAC_DM646X_MAC_EOI_C0_RXEN);
+
+               /* ack txen- only then a new pulse will be generated */
+               emac_write(EMAC_DM646X_MACEOIVECTOR,
+                       EMAC_DM646X_MAC_EOI_C0_TXEN);
+
                local_irq_restore(flags);
 
        } else {
@@ -951,15 +963,6 @@ static void emac_int_enable(struct emac_priv *priv)
                 * register */
 
                /* NOTE: Rx Threshold and Misc interrupts are not enabled */
-
-               /* ack rxen only then a new pulse will be generated */
-               emac_write(EMAC_DM646X_MACEOIVECTOR,
-                       EMAC_DM646X_MAC_EOI_C0_RXEN);
-
-               /* ack txen- only then a new pulse will be generated */
-               emac_write(EMAC_DM646X_MACEOIVECTOR,
-                       EMAC_DM646X_MAC_EOI_C0_TXEN);
-
        } else {
                /* Set DM644x control registers for interrupt control */
                emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1);
@@ -1537,7 +1540,13 @@ static int emac_dev_open(struct net_device *ndev)
        int i = 0;
        struct emac_priv *priv = netdev_priv(ndev);
 
-       pm_runtime_get(&priv->pdev->dev);
+       ret = pm_runtime_get_sync(&priv->pdev->dev);
+       if (ret < 0) {
+               pm_runtime_put_noidle(&priv->pdev->dev);
+               dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
+                       __func__, ret);
+               return ret;
+       }
 
        netif_carrier_off(ndev);
        for (cnt = 0; cnt < ETH_ALEN; cnt++)
@@ -1596,8 +1605,20 @@ static int emac_dev_open(struct net_device *ndev)
        cpdma_ctlr_start(priv->dma);
 
        priv->phydev = NULL;
+
+       if (priv->phy_node) {
+               priv->phydev = of_phy_connect(ndev, priv->phy_node,
+                                             &emac_adjust_link, 0, 0);
+               if (!priv->phydev) {
+                       dev_err(emac_dev, "could not connect to phy %s\n",
+                               priv->phy_node->full_name);
+                       ret = -ENODEV;
+                       goto err;
+               }
+       }
+
        /* use the first phy on the bus if pdata did not give us a phy id */
-       if (!priv->phy_id) {
+       if (!priv->phydev && !priv->phy_id) {
                struct device *phy;
 
                phy = bus_find_device(&mdio_bus_type, NULL, NULL,
@@ -1606,7 +1627,7 @@ static int emac_dev_open(struct net_device *ndev)
                        priv->phy_id = dev_name(phy);
        }
 
-       if (priv->phy_id && *priv->phy_id) {
+       if (!priv->phydev && priv->phy_id && *priv->phy_id) {
                priv->phydev = phy_connect(ndev, priv->phy_id,
                                           &emac_adjust_link,
                                           PHY_INTERFACE_MODE_MII);
@@ -1627,7 +1648,9 @@ static int emac_dev_open(struct net_device *ndev)
                        "(mii_bus:phy_addr=%s, id=%x)\n",
                        priv->phydev->drv->name, dev_name(&priv->phydev->dev),
                        priv->phydev->phy_id);
-       } else {
+       }
+
+       if (!priv->phydev) {
                /* No PHY , fix the link, speed and duplex settings */
                dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
                priv->link = 1;
@@ -1724,6 +1747,15 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
        struct emac_priv *priv = netdev_priv(ndev);
        u32 mac_control;
        u32 stats_clear_mask;
+       int err;
+
+       err = pm_runtime_get_sync(&priv->pdev->dev);
+       if (err < 0) {
+               pm_runtime_put_noidle(&priv->pdev->dev);
+               dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
+                       __func__, err);
+               return &ndev->stats;
+       }
 
        /* update emac hardware stats and reset the registers*/
 
@@ -1766,6 +1798,8 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
        ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN);
        emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
 
+       pm_runtime_put(&priv->pdev->dev);
+
        return &ndev->stats;
 }
 
@@ -1807,7 +1841,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
        if (!is_valid_ether_addr(pdata->mac_addr)) {
                mac_addr = of_get_mac_address(np);
                if (mac_addr)
-                       memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
+                       ether_addr_copy(pdata->mac_addr, mac_addr);
        }
 
        of_property_read_u32(np, "ti,davinci-ctrl-reg-offset",
@@ -1848,6 +1882,53 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
        return  pdata;
 }
 
+static int davinci_emac_3517_get_macid(struct device *dev, u16 offset,
+                                      int slave, u8 *mac_addr)
+{
+       u32 macid_lsb;
+       u32 macid_msb;
+       struct regmap *syscon;
+
+       syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+       if (IS_ERR(syscon)) {
+               if (PTR_ERR(syscon) == -ENODEV)
+                       return 0;
+               return PTR_ERR(syscon);
+       }
+
+       regmap_read(syscon, offset, &macid_lsb);
+       regmap_read(syscon, offset + 4, &macid_msb);
+
+       mac_addr[0] = (macid_msb >> 16) & 0xff;
+       mac_addr[1] = (macid_msb >> 8)  & 0xff;
+       mac_addr[2] = macid_msb & 0xff;
+       mac_addr[3] = (macid_lsb >> 16) & 0xff;
+       mac_addr[4] = (macid_lsb >> 8)  & 0xff;
+       mac_addr[5] = macid_lsb & 0xff;
+
+       return 0;
+}
+
+static int davinci_emac_try_get_mac(struct platform_device *pdev,
+                                   int instance, u8 *mac_addr)
+{
+       int error = -EINVAL;
+
+       if (!pdev->dev.of_node)
+               return error;
+
+       if (of_device_is_compatible(pdev->dev.of_node, "ti,am3517-emac"))
+               error = davinci_emac_3517_get_macid(&pdev->dev, 0x110,
+                                                   0, mac_addr);
+       else if (of_device_is_compatible(pdev->dev.of_node,
+                                        "ti,dm816-emac"))
+               error = cpsw_am33xx_cm_get_macid(&pdev->dev, 0x30,
+                                                instance,
+                                                mac_addr);
+
+       return error;
+}
+
 /**
  * davinci_emac_probe - EMAC device probe
  * @pdev: The DaVinci EMAC device that we are removing
@@ -1859,7 +1940,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
 static int davinci_emac_probe(struct platform_device *pdev)
 {
        int rc = 0;
-       struct resource *res;
+       struct resource *res, *res_ctrl;
        struct net_device *ndev;
        struct emac_priv *priv;
        unsigned long hw_ram_addr;
@@ -1876,6 +1957,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
                return -EBUSY;
        }
        emac_bus_frequency = clk_get_rate(emac_clk);
+       devm_clk_put(&pdev->dev, emac_clk);
 
        /* TODO: Probe PHY here if possible */
 
@@ -1917,11 +1999,20 @@ static int davinci_emac_probe(struct platform_device *pdev)
                rc = PTR_ERR(priv->remap_addr);
                goto no_pdata;
        }
+
+       res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (res_ctrl) {
+               priv->ctrl_base =
+                       devm_ioremap_resource(&pdev->dev, res_ctrl);
+               if (IS_ERR(priv->ctrl_base))
+                       goto no_pdata;
+       } else {
+               priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
+       }
+
        priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
        ndev->base_addr = (unsigned long)priv->remap_addr;
 
-       priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
-
        hw_ram_addr = pdata->hw_ram_addr;
        if (!hw_ram_addr)
                hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
@@ -1968,6 +2059,10 @@ static int davinci_emac_probe(struct platform_device *pdev)
        }
        ndev->irq = res->start;
 
+       rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr);
+       if (!rc)
+               ether_addr_copy(ndev->dev_addr, priv->mac_addr);
+
        if (!is_valid_ether_addr(priv->mac_addr)) {
                /* Use random MAC if none passed */
                eth_hw_addr_random(ndev);
@@ -1980,12 +2075,22 @@ static int davinci_emac_probe(struct platform_device *pdev)
        ndev->ethtool_ops = &ethtool_ops;
        netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
 
+       pm_runtime_enable(&pdev->dev);
+       rc = pm_runtime_get_sync(&pdev->dev);
+       if (rc < 0) {
+               pm_runtime_put_noidle(&pdev->dev);
+               dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
+                       __func__, rc);
+               goto no_cpdma_chan;
+       }
+
        /* register the network device */
        SET_NETDEV_DEV(ndev, &pdev->dev);
        rc = register_netdev(ndev);
        if (rc) {
                dev_err(&pdev->dev, "error in register_netdev\n");
                rc = -ENODEV;
+               pm_runtime_put(&pdev->dev);
                goto no_cpdma_chan;
        }
 
@@ -1995,9 +2100,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
                           "(regs: %p, irq: %d)\n",
                           (void *)priv->emac_base_phys, ndev->irq);
        }
-
-       pm_runtime_enable(&pdev->dev);
-       pm_runtime_resume(&pdev->dev);
+       pm_runtime_put(&pdev->dev);
 
        return 0;
 
@@ -2071,9 +2174,14 @@ static const struct emac_platform_data am3517_emac_data = {
        .hw_ram_addr            = 0x01e20000,
 };
 
+static const struct emac_platform_data dm816_emac_data = {
+       .version                = EMAC_VERSION_2,
+};
+
 static const struct of_device_id davinci_emac_of_match[] = {
        {.compatible = "ti,davinci-dm6467-emac", },
        {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, },
+       {.compatible = "ti,dm816-emac", .data = &dm816_emac_data, },
        {},
 };
 MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
new file mode 100644 (file)
index 0000000..906e9bc
--- /dev/null
@@ -0,0 +1,229 @@
+/*
+ * NetCP driver local header
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors:    Sandeep Nair <sandeep_n@ti.com>
+ *             Sandeep Paulraj <s-paulraj@ti.com>
+ *             Cyril Chemparathy <cyril@ti.com>
+ *             Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *             Wingman Kwok <w-kwok2@ti.com>
+ *             Murali Karicheri <m-karicheri2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __NETCP_H__
+#define __NETCP_H__
+
+#include <linux/netdevice.h>
+#include <linux/soc/ti/knav_dma.h>
+
+/* Maximum Ethernet frame size supported by Keystone switch */
+#define NETCP_MAX_FRAME_SIZE           9504
+
+#define SGMII_LINK_MAC_MAC_AUTONEG     0
+#define SGMII_LINK_MAC_PHY             1
+#define SGMII_LINK_MAC_MAC_FORCED      2
+#define SGMII_LINK_MAC_FIBER           3
+#define SGMII_LINK_MAC_PHY_NO_MDIO     4
+#define XGMII_LINK_MAC_PHY             10
+#define XGMII_LINK_MAC_MAC_FORCED      11
+
+struct netcp_device;
+
+struct netcp_tx_pipe {
+       struct netcp_device     *netcp_device;
+       void                    *dma_queue;
+       unsigned int            dma_queue_id;
+       u8                      dma_psflags;
+       void                    *dma_channel;
+       const char              *dma_chan_name;
+};
+
+#define ADDR_NEW                       BIT(0)
+#define ADDR_VALID                     BIT(1)
+
+enum netcp_addr_type {
+       ADDR_ANY,
+       ADDR_DEV,
+       ADDR_UCAST,
+       ADDR_MCAST,
+       ADDR_BCAST
+};
+
+struct netcp_addr {
+       struct netcp_intf       *netcp;
+       unsigned char           addr[ETH_ALEN];
+       enum netcp_addr_type    type;
+       unsigned int            flags;
+       struct list_head        node;
+};
+
+struct netcp_intf {
+       struct device           *dev;
+       struct device           *ndev_dev;
+       struct net_device       *ndev;
+       bool                    big_endian;
+       unsigned int            tx_compl_qid;
+       void                    *tx_pool;
+       struct list_head        txhook_list_head;
+       unsigned int            tx_pause_threshold;
+       void                    *tx_compl_q;
+
+       unsigned int            tx_resume_threshold;
+       void                    *rx_queue;
+       void                    *rx_pool;
+       struct list_head        rxhook_list_head;
+       unsigned int            rx_queue_id;
+       void                    *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
+       u32                     rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
+       struct napi_struct      rx_napi;
+       struct napi_struct      tx_napi;
+
+       void                    *rx_channel;
+       const char              *dma_chan_name;
+       u32                     rx_pool_size;
+       u32                     rx_pool_region_id;
+       u32                     tx_pool_size;
+       u32                     tx_pool_region_id;
+       struct list_head        module_head;
+       struct list_head        interface_list;
+       struct list_head        addr_list;
+       bool                    netdev_registered;
+       bool                    primary_module_attached;
+
+       /* Lock used for protecting Rx/Tx hook list management */
+       spinlock_t              lock;
+       struct netcp_device     *netcp_device;
+       struct device_node      *node_interface;
+
+       /* DMA configuration data */
+       u32                     msg_enable;
+       u32                     rx_queue_depths[KNAV_DMA_FDQ_PER_CHAN];
+};
+
+#define        NETCP_PSDATA_LEN                KNAV_DMA_NUM_PS_WORDS
+struct netcp_packet {
+       struct sk_buff          *skb;
+       u32                     *epib;
+       u32                     *psdata;
+       unsigned int            psdata_len;
+       struct netcp_intf       *netcp;
+       struct netcp_tx_pipe    *tx_pipe;
+       bool                    rxtstamp_complete;
+       void                    *ts_context;
+
+       int     (*txtstamp_complete)(void *ctx, struct netcp_packet *pkt);
+};
+
+static inline u32 *netcp_push_psdata(struct netcp_packet *p_info,
+                                    unsigned int bytes)
+{
+       u32 *buf;
+       unsigned int words;
+
+       if ((bytes & 0x03) != 0)
+               return NULL;
+       words = bytes >> 2;
+
+       if ((p_info->psdata_len + words) > NETCP_PSDATA_LEN)
+               return NULL;
+
+       p_info->psdata_len += words;
+       buf = &p_info->psdata[NETCP_PSDATA_LEN - p_info->psdata_len];
+       return buf;
+}
+
+static inline int netcp_align_psdata(struct netcp_packet *p_info,
+                                    unsigned int byte_align)
+{
+       int padding;
+
+       switch (byte_align) {
+       case 0:
+               padding = -EINVAL;
+               break;
+       case 1:
+       case 2:
+       case 4:
+               padding = 0;
+               break;
+       case 8:
+               padding = (p_info->psdata_len << 2) % 8;
+               break;
+       case 16:
+               padding = (p_info->psdata_len << 2) % 16;
+               break;
+       default:
+               padding = (p_info->psdata_len << 2) % byte_align;
+               break;
+       }
+       return padding;
+}
+
+struct netcp_module {
+       const char              *name;
+       struct module           *owner;
+       bool                    primary;
+
+       /* probe/remove: called once per NETCP instance */
+       int     (*probe)(struct netcp_device *netcp_device,
+                        struct device *device, struct device_node *node,
+                        void **inst_priv);
+       int     (*remove)(struct netcp_device *netcp_device, void *inst_priv);
+
+       /* attach/release: called once per network interface */
+       int     (*attach)(void *inst_priv, struct net_device *ndev,
+                         struct device_node *node, void **intf_priv);
+       int     (*release)(void *intf_priv);
+       int     (*open)(void *intf_priv, struct net_device *ndev);
+       int     (*close)(void *intf_priv, struct net_device *ndev);
+       int     (*add_addr)(void *intf_priv, struct netcp_addr *naddr);
+       int     (*del_addr)(void *intf_priv, struct netcp_addr *naddr);
+       int     (*add_vid)(void *intf_priv, int vid);
+       int     (*del_vid)(void *intf_priv, int vid);
+       int     (*ioctl)(void *intf_priv, struct ifreq *req, int cmd);
+
+       /* used internally */
+       struct list_head        module_list;
+       struct list_head        interface_list;
+};
+
+int netcp_register_module(struct netcp_module *module);
+void netcp_unregister_module(struct netcp_module *module);
+void *netcp_module_get_intf_data(struct netcp_module *module,
+                                struct netcp_intf *intf);
+
+int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
+                     struct netcp_device *netcp_device,
+                     const char *dma_chan_name, unsigned int dma_queue_id);
+int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe);
+int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe);
+
+typedef int netcp_hook_rtn(int order, void *data, struct netcp_packet *packet);
+int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
+                         netcp_hook_rtn *hook_rtn, void *hook_data);
+int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
+                           netcp_hook_rtn *hook_rtn, void *hook_data);
+int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
+                         netcp_hook_rtn *hook_rtn, void *hook_data);
+int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
+                           netcp_hook_rtn *hook_rtn, void *hook_data);
+void *netcp_device_find_module(struct netcp_device *netcp_device,
+                              const char *name);
+
+/* SGMII functions */
+int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
+int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
+int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
+
+/* XGBE SERDES init functions */
+int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs);
+
+#endif /* __NETCP_H__ */
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
new file mode 100644 (file)
index 0000000..a31a8c3
--- /dev/null
@@ -0,0 +1,2149 @@
+/*
+ * Keystone NetCP Core driver
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors:    Sandeep Nair <sandeep_n@ti.com>
+ *             Sandeep Paulraj <s-paulraj@ti.com>
+ *             Cyril Chemparathy <cyril@ti.com>
+ *             Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *             Murali Karicheri <m-karicheri2@ti.com>
+ *             Wingman Kwok <w-kwok2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/if_vlan.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/knav_qmss.h>
+#include <linux/soc/ti/knav_dma.h>
+
+#include "netcp.h"
+
+#define NETCP_SOP_OFFSET       (NET_IP_ALIGN + NET_SKB_PAD)
+#define NETCP_NAPI_WEIGHT      64
+#define NETCP_TX_TIMEOUT       (5 * HZ)
+#define NETCP_MIN_PACKET_SIZE  ETH_ZLEN
+#define NETCP_MAX_MCAST_ADDR   16
+
+#define NETCP_EFUSE_REG_INDEX  0
+
+#define NETCP_MOD_PROBE_SKIPPED        1
+#define NETCP_MOD_PROBE_FAILED 2
+
+#define NETCP_DEBUG (NETIF_MSG_HW      | NETIF_MSG_WOL         |       \
+                   NETIF_MSG_DRV       | NETIF_MSG_LINK        |       \
+                   NETIF_MSG_IFUP      | NETIF_MSG_INTR        |       \
+                   NETIF_MSG_PROBE     | NETIF_MSG_TIMER       |       \
+                   NETIF_MSG_IFDOWN    | NETIF_MSG_RX_ERR      |       \
+                   NETIF_MSG_TX_ERR    | NETIF_MSG_TX_DONE     |       \
+                   NETIF_MSG_PKTDATA   | NETIF_MSG_TX_QUEUED   |       \
+                   NETIF_MSG_RX_STATUS)
+
+#define knav_queue_get_id(q)   knav_queue_device_control(q, \
+                               KNAV_QUEUE_GET_ID, (unsigned long)NULL)
+
+#define knav_queue_enable_notify(q) knav_queue_device_control(q,       \
+                                       KNAV_QUEUE_ENABLE_NOTIFY,       \
+                                       (unsigned long)NULL)
+
+#define knav_queue_disable_notify(q) knav_queue_device_control(q,      \
+                                       KNAV_QUEUE_DISABLE_NOTIFY,      \
+                                       (unsigned long)NULL)
+
+#define knav_queue_get_count(q)        knav_queue_device_control(q, \
+                               KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
+
+#define for_each_netcp_module(module)                  \
+       list_for_each_entry(module, &netcp_modules, module_list)
+
+#define for_each_netcp_device_module(netcp_device, inst_modpriv) \
+       list_for_each_entry(inst_modpriv, \
+               &((netcp_device)->modpriv_head), inst_list)
+
+#define for_each_module(netcp, intf_modpriv)                   \
+       list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
+
+/* Module management structures */
+struct netcp_device {
+       struct list_head        device_list;
+       struct list_head        interface_head;
+       struct list_head        modpriv_head;
+       struct device           *device;
+};
+
+struct netcp_inst_modpriv {
+       struct netcp_device     *netcp_device;
+       struct netcp_module     *netcp_module;
+       struct list_head        inst_list;
+       void                    *module_priv;
+};
+
+struct netcp_intf_modpriv {
+       struct netcp_intf       *netcp_priv;
+       struct netcp_module     *netcp_module;
+       struct list_head        intf_list;
+       void                    *module_priv;
+};
+
+static LIST_HEAD(netcp_devices);
+static LIST_HEAD(netcp_modules);
+static DEFINE_MUTEX(netcp_modules_lock);
+
+static int netcp_debug_level = -1;
+module_param(netcp_debug_level, int, 0);
+MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
+
+/* Helper functions - Get/Set */
+static void get_pkt_info(u32 *buff, u32 *buff_len, u32 *ndesc,
+                        struct knav_dma_desc *desc)
+{
+       *buff_len = desc->buff_len;
+       *buff = desc->buff;
+       *ndesc = desc->next_desc;
+}
+
+static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc)
+{
+       *pad0 = desc->pad[0];
+       *pad1 = desc->pad[1];
+}
+
+static void get_org_pkt_info(u32 *buff, u32 *buff_len,
+                            struct knav_dma_desc *desc)
+{
+       *buff = desc->orig_buff;
+       *buff_len = desc->orig_len;
+}
+
+static void get_words(u32 *words, int num_words, u32 *desc)
+{
+       int i;
+
+       for (i = 0; i < num_words; i++)
+               words[i] = desc[i];
+}
+
+static void set_pkt_info(u32 buff, u32 buff_len, u32 ndesc,
+                        struct knav_dma_desc *desc)
+{
+       desc->buff_len = buff_len;
+       desc->buff = buff;
+       desc->next_desc = ndesc;
+}
+
+static void set_desc_info(u32 desc_info, u32 pkt_info,
+                         struct knav_dma_desc *desc)
+{
+       desc->desc_info = desc_info;
+       desc->packet_info = pkt_info;
+}
+
+static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc)
+{
+       desc->pad[0] = pad0;
+       desc->pad[1] = pad1;
+}
+
+static void set_org_pkt_info(u32 buff, u32 buff_len,
+                            struct knav_dma_desc *desc)
+{
+       desc->orig_buff = buff;
+       desc->orig_len = buff_len;
+}
+
+static void set_words(u32 *words, int num_words, u32 *desc)
+{
+       int i;
+
+       for (i = 0; i < num_words; i++)
+               desc[i] = words[i];
+}
+
+/* Read the e-fuse value as 32 bit values to be endian independent */
+static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac)
+{
+       unsigned int addr0, addr1;
+
+       addr1 = readl(efuse_mac + 4);
+       addr0 = readl(efuse_mac);
+
+       x[0] = (addr1 & 0x0000ff00) >> 8;
+       x[1] = addr1 & 0x000000ff;
+       x[2] = (addr0 & 0xff000000) >> 24;
+       x[3] = (addr0 & 0x00ff0000) >> 16;
+       x[4] = (addr0 & 0x0000ff00) >> 8;
+       x[5] = addr0 & 0x000000ff;
+
+       return 0;
+}
+
+static const char *netcp_node_name(struct device_node *node)
+{
+       const char *name;
+
+       if (of_property_read_string(node, "label", &name) < 0)
+               name = node->name;
+       if (!name)
+               name = "unknown";
+       return name;
+}
+
+/* Module management routines */
+static int netcp_register_interface(struct netcp_intf *netcp)
+{
+       int ret;
+
+       ret = register_netdev(netcp->ndev);
+       if (!ret)
+               netcp->netdev_registered = true;
+       return ret;
+}
+
+static int netcp_module_probe(struct netcp_device *netcp_device,
+                             struct netcp_module *module)
+{
+       struct device *dev = netcp_device->device;
+       struct device_node *devices, *interface, *node = dev->of_node;
+       struct device_node *child;
+       struct netcp_inst_modpriv *inst_modpriv;
+       struct netcp_intf *netcp_intf;
+       struct netcp_module *tmp;
+       bool primary_module_registered = false;
+       int ret;
+
+       /* Find this module in the sub-tree for this device */
+       devices = of_get_child_by_name(node, "netcp-devices");
+       if (!devices) {
+               dev_err(dev, "could not find netcp-devices node\n");
+               return NETCP_MOD_PROBE_SKIPPED;
+       }
+
+       for_each_available_child_of_node(devices, child) {
+               const char *name = netcp_node_name(child);
+
+               if (!strcasecmp(module->name, name))
+                       break;
+       }
+
+       of_node_put(devices);
+       /* If module not used for this device, skip it */
+       if (!child) {
+               dev_warn(dev, "module(%s) not used for device\n", module->name);
+               return NETCP_MOD_PROBE_SKIPPED;
+       }
+
+       inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL);
+       if (!inst_modpriv) {
+               of_node_put(child);
+               return -ENOMEM;
+       }
+
+       inst_modpriv->netcp_device = netcp_device;
+       inst_modpriv->netcp_module = module;
+       list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head);
+
+       ret = module->probe(netcp_device, dev, child,
+                           &inst_modpriv->module_priv);
+       of_node_put(child);
+       if (ret) {
+               dev_err(dev, "Probe of module(%s) failed with %d\n",
+                       module->name, ret);
+               list_del(&inst_modpriv->inst_list);
+               devm_kfree(dev, inst_modpriv);
+               return NETCP_MOD_PROBE_FAILED;
+       }
+
+       /* Attach modules only if the primary module is probed */
+       for_each_netcp_module(tmp) {
+               if (tmp->primary)
+                       primary_module_registered = true;
+       }
+
+       if (!primary_module_registered)
+               return 0;
+
+       /* Attach module to interfaces */
+       list_for_each_entry(netcp_intf, &netcp_device->interface_head,
+                           interface_list) {
+               struct netcp_intf_modpriv *intf_modpriv;
+
+               /* If interface not registered then register now */
+               if (!netcp_intf->netdev_registered)
+                       ret = netcp_register_interface(netcp_intf);
+
+               if (ret)
+                       return -ENODEV;
+
+               intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
+                                           GFP_KERNEL);
+               if (!intf_modpriv)
+                       return -ENOMEM;
+
+               interface = of_parse_phandle(netcp_intf->node_interface,
+                                            module->name, 0);
+
+               intf_modpriv->netcp_priv = netcp_intf;
+               intf_modpriv->netcp_module = module;
+               list_add_tail(&intf_modpriv->intf_list,
+                             &netcp_intf->module_head);
+
+               ret = module->attach(inst_modpriv->module_priv,
+                                    netcp_intf->ndev, interface,
+                                    &intf_modpriv->module_priv);
+               of_node_put(interface);
+               if (ret) {
+                       dev_dbg(dev, "Attach of module %s declined with %d\n",
+                               module->name, ret);
+                       list_del(&intf_modpriv->intf_list);
+                       devm_kfree(dev, intf_modpriv);
+                       continue;
+               }
+       }
+       return 0;
+}
+
+int netcp_register_module(struct netcp_module *module)
+{
+       struct netcp_device *netcp_device;
+       struct netcp_module *tmp;
+       int ret;
+
+       if (!module->name) {
+               WARN(1, "error registering netcp module: no name\n");
+               return -EINVAL;
+       }
+
+       if (!module->probe) {
+               WARN(1, "error registering netcp module: no probe\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&netcp_modules_lock);
+
+       for_each_netcp_module(tmp) {
+               if (!strcasecmp(tmp->name, module->name)) {
+                       mutex_unlock(&netcp_modules_lock);
+                       return -EEXIST;
+               }
+       }
+       list_add_tail(&module->module_list, &netcp_modules);
+
+       list_for_each_entry(netcp_device, &netcp_devices, device_list) {
+               ret = netcp_module_probe(netcp_device, module);
+               if (ret < 0)
+                       goto fail;
+       }
+
+       mutex_unlock(&netcp_modules_lock);
+       return 0;
+
+fail:
+       mutex_unlock(&netcp_modules_lock);
+       netcp_unregister_module(module);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(netcp_register_module);
+
+static void netcp_release_module(struct netcp_device *netcp_device,
+                                struct netcp_module *module)
+{
+       struct netcp_inst_modpriv *inst_modpriv, *inst_tmp;
+       struct netcp_intf *netcp_intf, *netcp_tmp;
+       struct device *dev = netcp_device->device;
+
+       /* Release the module from each interface */
+       list_for_each_entry_safe(netcp_intf, netcp_tmp,
+                                &netcp_device->interface_head,
+                                interface_list) {
+               struct netcp_intf_modpriv *intf_modpriv, *intf_tmp;
+
+               list_for_each_entry_safe(intf_modpriv, intf_tmp,
+                                        &netcp_intf->module_head,
+                                        intf_list) {
+                       if (intf_modpriv->netcp_module == module) {
+                               module->release(intf_modpriv->module_priv);
+                               list_del(&intf_modpriv->intf_list);
+                               devm_kfree(dev, intf_modpriv);
+                               break;
+                       }
+               }
+       }
+
+       /* Remove the module from each instance */
+       list_for_each_entry_safe(inst_modpriv, inst_tmp,
+                                &netcp_device->modpriv_head, inst_list) {
+               if (inst_modpriv->netcp_module == module) {
+                       module->remove(netcp_device,
+                                      inst_modpriv->module_priv);
+                       list_del(&inst_modpriv->inst_list);
+                       devm_kfree(dev, inst_modpriv);
+                       break;
+               }
+       }
+}
+
+void netcp_unregister_module(struct netcp_module *module)
+{
+       struct netcp_device *netcp_device;
+       struct netcp_module *module_tmp;
+
+       mutex_lock(&netcp_modules_lock);
+
+       list_for_each_entry(netcp_device, &netcp_devices, device_list) {
+               netcp_release_module(netcp_device, module);
+       }
+
+       /* Remove the module from the module list */
+       for_each_netcp_module(module_tmp) {
+               if (module == module_tmp) {
+                       list_del(&module->module_list);
+                       break;
+               }
+       }
+
+       mutex_unlock(&netcp_modules_lock);
+}
+EXPORT_SYMBOL_GPL(netcp_unregister_module);
+
+void *netcp_module_get_intf_data(struct netcp_module *module,
+                                struct netcp_intf *intf)
+{
+       struct netcp_intf_modpriv *intf_modpriv;
+
+       list_for_each_entry(intf_modpriv, &intf->module_head, intf_list)
+               if (intf_modpriv->netcp_module == module)
+                       return intf_modpriv->module_priv;
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(netcp_module_get_intf_data);
+
+/* Module TX and RX Hook management */
+struct netcp_hook_list {
+       struct list_head         list;
+       netcp_hook_rtn          *hook_rtn;
+       void                    *hook_data;
+       int                      order;
+};
+
+int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
+                         netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+       struct netcp_hook_list *entry;
+       struct netcp_hook_list *next;
+       unsigned long flags;
+
+       entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       entry->hook_rtn  = hook_rtn;
+       entry->hook_data = hook_data;
+       entry->order     = order;
+
+       spin_lock_irqsave(&netcp_priv->lock, flags);
+       list_for_each_entry(next, &netcp_priv->txhook_list_head, list) {
+               if (next->order > order)
+                       break;
+       }
+       __list_add(&entry->list, next->list.prev, &next->list);
+       spin_unlock_irqrestore(&netcp_priv->lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(netcp_register_txhook);
+
+int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
+                           netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+       struct netcp_hook_list *next, *n;
+       unsigned long flags;
+
+       spin_lock_irqsave(&netcp_priv->lock, flags);
+       list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) {
+               if ((next->order     == order) &&
+                   (next->hook_rtn  == hook_rtn) &&
+                   (next->hook_data == hook_data)) {
+                       list_del(&next->list);
+                       spin_unlock_irqrestore(&netcp_priv->lock, flags);
+                       devm_kfree(netcp_priv->dev, next);
+                       return 0;
+               }
+       }
+       spin_unlock_irqrestore(&netcp_priv->lock, flags);
+       return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(netcp_unregister_txhook);
+
+int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
+                         netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+       struct netcp_hook_list *entry;
+       struct netcp_hook_list *next;
+       unsigned long flags;
+
+       entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       entry->hook_rtn  = hook_rtn;
+       entry->hook_data = hook_data;
+       entry->order     = order;
+
+       spin_lock_irqsave(&netcp_priv->lock, flags);
+       list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) {
+               if (next->order > order)
+                       break;
+       }
+       __list_add(&entry->list, next->list.prev, &next->list);
+       spin_unlock_irqrestore(&netcp_priv->lock, flags);
+
+       return 0;
+}
+
+int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
+                           netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+       struct netcp_hook_list *next, *n;
+       unsigned long flags;
+
+       spin_lock_irqsave(&netcp_priv->lock, flags);
+       list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) {
+               if ((next->order     == order) &&
+                   (next->hook_rtn  == hook_rtn) &&
+                   (next->hook_data == hook_data)) {
+                       list_del(&next->list);
+                       spin_unlock_irqrestore(&netcp_priv->lock, flags);
+                       devm_kfree(netcp_priv->dev, next);
+                       return 0;
+               }
+       }
+       spin_unlock_irqrestore(&netcp_priv->lock, flags);
+
+       return -ENOENT;
+}
+
+static void netcp_frag_free(bool is_frag, void *ptr)
+{
+       if (is_frag)
+               put_page(virt_to_head_page(ptr));
+       else
+               kfree(ptr);
+}
+
+static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
+                                    struct knav_dma_desc *desc)
+{
+       struct knav_dma_desc *ndesc;
+       dma_addr_t dma_desc, dma_buf;
+       unsigned int buf_len, dma_sz = sizeof(*ndesc);
+       void *buf_ptr;
+       u32 tmp;
+
+       get_words(&dma_desc, 1, &desc->next_desc);
+
+       while (dma_desc) {
+               ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
+               if (unlikely(!ndesc)) {
+                       dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+                       break;
+               }
+               get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
+               get_pad_info((u32 *)&buf_ptr, &tmp, ndesc);
+               dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
+               __free_page(buf_ptr);
+               knav_pool_desc_put(netcp->rx_pool, desc);
+       }
+
+       get_pad_info((u32 *)&buf_ptr, &buf_len, desc);
+       if (buf_ptr)
+               netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
+       knav_pool_desc_put(netcp->rx_pool, desc);
+}
+
+static void netcp_empty_rx_queue(struct netcp_intf *netcp)
+{
+       struct knav_dma_desc *desc;
+       unsigned int dma_sz;
+       dma_addr_t dma;
+
+       for (; ;) {
+               dma = knav_queue_pop(netcp->rx_queue, &dma_sz);
+               if (!dma)
+                       break;
+
+               desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
+               if (unlikely(!desc)) {
+                       dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n",
+                               __func__);
+                       netcp->ndev->stats.rx_errors++;
+                       continue;
+               }
+               netcp_free_rx_desc_chain(netcp, desc);
+               netcp->ndev->stats.rx_dropped++;
+       }
+}
+
+static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
+{
+       unsigned int dma_sz, buf_len, org_buf_len;
+       struct knav_dma_desc *desc, *ndesc;
+       unsigned int pkt_sz = 0, accum_sz;
+       struct netcp_hook_list *rx_hook;
+       dma_addr_t dma_desc, dma_buff;
+       struct netcp_packet p_info;
+       struct sk_buff *skb;
+       void *org_buf_ptr;
+       u32 tmp;
+
+       dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
+       if (!dma_desc)
+               return -1;
+
+       desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
+       if (unlikely(!desc)) {
+               dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+               return 0;
+       }
+
+       get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
+       get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc);
+
+       if (unlikely(!org_buf_ptr)) {
+               dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
+               goto free_desc;
+       }
+
+       pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK;
+       accum_sz = buf_len;
+       dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE);
+
+       /* Build a new sk_buff for the primary buffer */
+       skb = build_skb(org_buf_ptr, org_buf_len);
+       if (unlikely(!skb)) {
+               dev_err(netcp->ndev_dev, "build_skb() failed\n");
+               goto free_desc;
+       }
+
+       /* update data, tail and len */
+       skb_reserve(skb, NETCP_SOP_OFFSET);
+       __skb_put(skb, buf_len);
+
+       /* Fill in the page fragment list */
+       while (dma_desc) {
+               struct page *page;
+
+               ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
+               if (unlikely(!ndesc)) {
+                       dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+                       goto free_desc;
+               }
+
+               get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
+               get_pad_info((u32 *)&page, &tmp, ndesc);
+
+               if (likely(dma_buff && buf_len && page)) {
+                       dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
+                                      DMA_FROM_DEVICE);
+               } else {
+                       dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%p), len(%d), page(%p)\n",
+                               (void *)dma_buff, buf_len, page);
+                       goto free_desc;
+               }
+
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                               offset_in_page(dma_buff), buf_len, PAGE_SIZE);
+               accum_sz += buf_len;
+
+               /* Free the descriptor */
+               knav_pool_desc_put(netcp->rx_pool, ndesc);
+       }
+
+       /* Free the primary descriptor */
+       knav_pool_desc_put(netcp->rx_pool, desc);
+
+       /* check for packet len and warn */
+       if (unlikely(pkt_sz != accum_sz))
+               dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
+                       pkt_sz, accum_sz);
+
+       /* Remove ethernet FCS from the packet */
+       __pskb_trim(skb, skb->len - ETH_FCS_LEN);
+
+       /* Call each of the RX hooks */
+       p_info.skb = skb;
+       p_info.rxtstamp_complete = false;
+       list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
+               int ret;
+
+               ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data,
+                                       &p_info);
+               if (unlikely(ret)) {
+                       dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n",
+                               rx_hook->order, ret);
+                       netcp->ndev->stats.rx_errors++;
+                       dev_kfree_skb(skb);
+                       return 0;
+               }
+       }
+
+       netcp->ndev->last_rx = jiffies;
+       netcp->ndev->stats.rx_packets++;
+       netcp->ndev->stats.rx_bytes += skb->len;
+
+       /* push skb up the stack */
+       skb->protocol = eth_type_trans(skb, netcp->ndev);
+       netif_receive_skb(skb);
+       return 0;
+
+free_desc:
+       netcp_free_rx_desc_chain(netcp, desc);
+       netcp->ndev->stats.rx_errors++;
+       return 0;
+}
+
+static int netcp_process_rx_packets(struct netcp_intf *netcp,
+                                   unsigned int budget)
+{
+       int i;
+
+       for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++)
+               ;
+       return i;
+}
+
+/* Release descriptors and attached buffers from Rx FDQ */
+static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
+{
+       struct knav_dma_desc *desc;
+       unsigned int buf_len, dma_sz;
+       dma_addr_t dma;
+       void *buf_ptr;
+       u32 tmp;
+
+       /* Allocate descriptor */
+       while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
+               desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
+               if (unlikely(!desc)) {
+                       dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+                       continue;
+               }
+
+               get_org_pkt_info(&dma, &buf_len, desc);
+               get_pad_info((u32 *)&buf_ptr, &tmp, desc);
+
+               if (unlikely(!dma)) {
+                       dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
+                       knav_pool_desc_put(netcp->rx_pool, desc);
+                       continue;
+               }
+
+               if (unlikely(!buf_ptr)) {
+                       dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
+                       knav_pool_desc_put(netcp->rx_pool, desc);
+                       continue;
+               }
+
+               if (fdq == 0) {
+                       dma_unmap_single(netcp->dev, dma, buf_len,
+                                        DMA_FROM_DEVICE);
+                       netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr);
+               } else {
+                       dma_unmap_page(netcp->dev, dma, buf_len,
+                                      DMA_FROM_DEVICE);
+                       __free_page(buf_ptr);
+               }
+
+               knav_pool_desc_put(netcp->rx_pool, desc);
+       }
+}
+
+static void netcp_rxpool_free(struct netcp_intf *netcp)
+{
+       int i;
+
+       for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
+            !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++)
+               netcp_free_rx_buf(netcp, i);
+
+       if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size)
+               dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n",
+                       netcp->rx_pool_size - knav_pool_count(netcp->rx_pool));
+
+       knav_pool_destroy(netcp->rx_pool);
+       netcp->rx_pool = NULL;
+}
+
+static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
+{
+       struct knav_dma_desc *hwdesc;
+       unsigned int buf_len, dma_sz;
+       u32 desc_info, pkt_info;
+       struct page *page;
+       dma_addr_t dma;
+       void *bufptr;
+       u32 pad[2];
+
+       /* Allocate descriptor */
+       hwdesc = knav_pool_desc_get(netcp->rx_pool);
+       if (IS_ERR_OR_NULL(hwdesc)) {
+               dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
+               return;
+       }
+
+       if (likely(fdq == 0)) {
+               unsigned int primary_buf_len;
+               /* Allocate a primary receive queue entry */
+               buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET;
+               primary_buf_len = SKB_DATA_ALIGN(buf_len) +
+                               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+               if (primary_buf_len <= PAGE_SIZE) {
+                       bufptr = netdev_alloc_frag(primary_buf_len);
+                       pad[1] = primary_buf_len;
+               } else {
+                       bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
+                                        GFP_DMA32 | __GFP_COLD);
+                       pad[1] = 0;
+               }
+
+               if (unlikely(!bufptr)) {
+                       dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n");
+                       goto fail;
+               }
+               dma = dma_map_single(netcp->dev, bufptr, buf_len,
+                                    DMA_TO_DEVICE);
+               pad[0] = (u32)bufptr;
+
+       } else {
+               /* Allocate a secondary receive queue entry */
+               page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD);
+               if (unlikely(!page)) {
+                       dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
+                       goto fail;
+               }
+               buf_len = PAGE_SIZE;
+               dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
+               pad[0] = (u32)page;
+               pad[1] = 0;
+       }
+
+       desc_info =  KNAV_DMA_DESC_PS_INFO_IN_DESC;
+       desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK;
+       pkt_info =  KNAV_DMA_DESC_HAS_EPIB;
+       pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT;
+       pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
+                   KNAV_DMA_DESC_RETQ_SHIFT;
+       set_org_pkt_info(dma, buf_len, hwdesc);
+       set_pad_info(pad[0], pad[1], hwdesc);
+       set_desc_info(desc_info, pkt_info, hwdesc);
+
+       /* Push to FDQs */
+       knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
+                          &dma_sz);
+       knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
+       return;
+
+fail:
+       knav_pool_desc_put(netcp->rx_pool, hwdesc);
+}
+
+/* Refill Rx FDQ with descriptors & attached buffers */
+static void netcp_rxpool_refill(struct netcp_intf *netcp)
+{
+       u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
+       int i;
+
+       /* Calculate the FDQ deficit and refill */
+       for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
+               fdq_deficit[i] = netcp->rx_queue_depths[i] -
+                                knav_queue_get_count(netcp->rx_fdq[i]);
+
+               while (fdq_deficit[i]--)
+                       netcp_allocate_rx_buf(netcp, i);
+       } /* end for fdqs */
+}
+
+/* NAPI poll */
+static int netcp_rx_poll(struct napi_struct *napi, int budget)
+{
+       struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
+                                               rx_napi);
+       unsigned int packets;
+
+       packets = netcp_process_rx_packets(netcp, budget);
+
+       if (packets < budget) {
+               napi_complete(&netcp->rx_napi);
+               knav_queue_enable_notify(netcp->rx_queue);
+       }
+
+       netcp_rxpool_refill(netcp);
+       return packets;
+}
+
+static void netcp_rx_notify(void *arg)
+{
+       struct netcp_intf *netcp = arg;
+
+       knav_queue_disable_notify(netcp->rx_queue);
+       napi_schedule(&netcp->rx_napi);
+}
+
+static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
+                                    struct knav_dma_desc *desc,
+                                    unsigned int desc_sz)
+{
+       struct knav_dma_desc *ndesc = desc;
+       dma_addr_t dma_desc, dma_buf;
+       unsigned int buf_len;
+
+       while (ndesc) {
+               get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc);
+
+               if (dma_buf && buf_len)
+                       dma_unmap_single(netcp->dev, dma_buf, buf_len,
+                                        DMA_TO_DEVICE);
+               else
+                       dev_warn(netcp->ndev_dev, "bad Tx desc buf(%p), len(%d)\n",
+                                (void *)dma_buf, buf_len);
+
+               knav_pool_desc_put(netcp->tx_pool, ndesc);
+               ndesc = NULL;
+               if (dma_desc) {
+                       ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc,
+                                                    desc_sz);
+                       if (!ndesc)
+                               dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
+               }
+       }
+}
+
+static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
+                                         unsigned int budget)
+{
+       struct knav_dma_desc *desc;
+       struct sk_buff *skb;
+       unsigned int dma_sz;
+       dma_addr_t dma;
+       int pkts = 0;
+       u32 tmp;
+
+       while (budget--) {
+               dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz);
+               if (!dma)
+                       break;
+               desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz);
+               if (unlikely(!desc)) {
+                       dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
+                       netcp->ndev->stats.tx_errors++;
+                       continue;
+               }
+
+               get_pad_info((u32 *)&skb, &tmp, desc);
+               netcp_free_tx_desc_chain(netcp, desc, dma_sz);
+               if (!skb) {
+                       dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
+                       netcp->ndev->stats.tx_errors++;
+                       continue;
+               }
+
+               if (netif_subqueue_stopped(netcp->ndev, skb) &&
+                   netif_running(netcp->ndev) &&
+                   (knav_pool_count(netcp->tx_pool) >
+                   netcp->tx_resume_threshold)) {
+                       u16 subqueue = skb_get_queue_mapping(skb);
+
+                       netif_wake_subqueue(netcp->ndev, subqueue);
+               }
+
+               netcp->ndev->stats.tx_packets++;
+               netcp->ndev->stats.tx_bytes += skb->len;
+               dev_kfree_skb(skb);
+               pkts++;
+       }
+       return pkts;
+}
+
+static int netcp_tx_poll(struct napi_struct *napi, int budget)
+{
+       int packets;
+       struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
+                                               tx_napi);
+
+       packets = netcp_process_tx_compl_packets(netcp, budget);
+       if (packets < budget) {
+               napi_complete(&netcp->tx_napi);
+               knav_queue_enable_notify(netcp->tx_compl_q);
+       }
+
+       return packets;
+}
+
+static void netcp_tx_notify(void *arg)
+{
+       struct netcp_intf *netcp = arg;
+
+       knav_queue_disable_notify(netcp->tx_compl_q);
+       napi_schedule(&netcp->tx_napi);
+}
+
+static struct knav_dma_desc*
+netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
+{
+       struct knav_dma_desc *desc, *ndesc, *pdesc;
+       unsigned int pkt_len = skb_headlen(skb);
+       struct device *dev = netcp->dev;
+       dma_addr_t dma_addr;
+       unsigned int dma_sz;
+       int i;
+
+       /* Map the linear buffer */
+       dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
+       if (unlikely(!dma_addr)) {
+               dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
+               return NULL;
+       }
+
+       desc = knav_pool_desc_get(netcp->tx_pool);
+       if (unlikely(IS_ERR_OR_NULL(desc))) {
+               dev_err(netcp->ndev_dev, "out of TX desc\n");
+               dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE);
+               return NULL;
+       }
+
+       set_pkt_info(dma_addr, pkt_len, 0, desc);
+       if (skb_is_nonlinear(skb)) {
+               prefetchw(skb_shinfo(skb));
+       } else {
+               desc->next_desc = 0;
+               goto upd_pkt_len;
+       }
+
+       pdesc = desc;
+
+       /* Handle the case where skb is fragmented in pages */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               struct page *page = skb_frag_page(frag);
+               u32 page_offset = frag->page_offset;
+               u32 buf_len = skb_frag_size(frag);
+               dma_addr_t desc_dma;
+               u32 pkt_info;
+
+               dma_addr = dma_map_page(dev, page, page_offset, buf_len,
+                                       DMA_TO_DEVICE);
+               if (unlikely(!dma_addr)) {
+                       dev_err(netcp->ndev_dev, "Failed to map skb page\n");
+                       goto free_descs;
+               }
+
+               ndesc = knav_pool_desc_get(netcp->tx_pool);
+               if (unlikely(IS_ERR_OR_NULL(ndesc))) {
+                       dev_err(netcp->ndev_dev, "out of TX desc for frags\n");
+                       dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE);
+                       goto free_descs;
+               }
+
+               desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool,
+                                                     (void *)ndesc);
+               pkt_info =
+                       (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
+                               KNAV_DMA_DESC_RETQ_SHIFT;
+               set_pkt_info(dma_addr, buf_len, 0, ndesc);
+               set_words(&desc_dma, 1, &pdesc->next_desc);
+               pkt_len += buf_len;
+               if (pdesc != desc)
+                       knav_pool_desc_map(netcp->tx_pool, pdesc,
+                                          sizeof(*pdesc), &desc_dma, &dma_sz);
+               pdesc = ndesc;
+       }
+       if (pdesc != desc)
+               knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc),
+                                  &dma_addr, &dma_sz);
+
+       /* frag list based linkage is not supported for now. */
+       if (skb_shinfo(skb)->frag_list) {
+               dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n");
+               goto free_descs;
+       }
+
+upd_pkt_len:
+       WARN_ON(pkt_len != skb->len);
+
+       pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK;
+       set_words(&pkt_len, 1, &desc->desc_info);
+       return desc;
+
+free_descs:
+       netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
+       return NULL;
+}
+
+static int netcp_tx_submit_skb(struct netcp_intf *netcp,
+                              struct sk_buff *skb,
+                              struct knav_dma_desc *desc)
+{
+       struct netcp_tx_pipe *tx_pipe = NULL;
+       struct netcp_hook_list *tx_hook;
+       struct netcp_packet p_info;
+       u32 packet_info = 0;
+       unsigned int dma_sz;
+       dma_addr_t dma;
+       int ret = 0;
+
+       p_info.netcp = netcp;
+       p_info.skb = skb;
+       p_info.tx_pipe = NULL;
+       p_info.psdata_len = 0;
+       p_info.ts_context = NULL;
+       p_info.txtstamp_complete = NULL;
+       p_info.epib = desc->epib;
+       p_info.psdata = desc->psdata;
+       memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(u32));
+
+       /* Find out where to inject the packet for transmission */
+       list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) {
+               ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data,
+                                       &p_info);
+               if (unlikely(ret != 0)) {
+                       dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n",
+                               tx_hook->order, ret);
+                       ret = (ret < 0) ? ret : NETDEV_TX_OK;
+                       goto out;
+               }
+       }
+
+       /* Make sure some TX hook claimed the packet */
+       tx_pipe = p_info.tx_pipe;
+       if (!tx_pipe) {
+               dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n");
+               ret = -ENXIO;
+               goto out;
+       }
+
+       /* update descriptor */
+       if (p_info.psdata_len) {
+               u32 *psdata = p_info.psdata;
+
+               memmove(p_info.psdata, p_info.psdata + p_info.psdata_len,
+                       p_info.psdata_len);
+               set_words(psdata, p_info.psdata_len, psdata);
+               packet_info |=
+                       (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
+                       KNAV_DMA_DESC_PSLEN_SHIFT;
+       }
+
+       packet_info |= KNAV_DMA_DESC_HAS_EPIB |
+               ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
+               KNAV_DMA_DESC_RETQ_SHIFT) |
+               ((tx_pipe->dma_psflags & KNAV_DMA_DESC_PSFLAG_MASK) <<
+               KNAV_DMA_DESC_PSFLAG_SHIFT);
+
+       set_words(&packet_info, 1, &desc->packet_info);
+       set_words((u32 *)&skb, 1, &desc->pad[0]);
+
+       /* submit packet descriptor */
+       ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma,
+                                &dma_sz);
+       if (unlikely(ret)) {
+               dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__);
+               ret = -ENOMEM;
+               goto out;
+       }
+       skb_tx_timestamp(skb);
+       knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0);
+
+out:
+       return ret;
+}
+
+/* Submit the packet */
+static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       int subqueue = skb_get_queue_mapping(skb);
+       struct knav_dma_desc *desc;
+       int desc_count, ret = 0;
+
+       if (unlikely(skb->len <= 0)) {
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
+       if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) {
+               ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE);
+               if (ret < 0) {
+                       /* If we get here, the skb has already been dropped */
+                       dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
+                                ret);
+                       ndev->stats.tx_dropped++;
+                       return ret;
+               }
+               skb->len = NETCP_MIN_PACKET_SIZE;
+       }
+
+       desc = netcp_tx_map_skb(skb, netcp);
+       if (unlikely(!desc)) {
+               netif_stop_subqueue(ndev, subqueue);
+               ret = -ENOBUFS;
+               goto drop;
+       }
+
+       ret = netcp_tx_submit_skb(netcp, skb, desc);
+       if (ret)
+               goto drop;
+
+       ndev->trans_start = jiffies;
+
+       /* Check Tx pool count & stop subqueue if needed */
+       desc_count = knav_pool_count(netcp->tx_pool);
+       if (desc_count < netcp->tx_pause_threshold) {
+               dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count);
+               netif_stop_subqueue(ndev, subqueue);
+       }
+       return NETDEV_TX_OK;
+
+drop:
+       ndev->stats.tx_dropped++;
+       if (desc)
+               netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
+       dev_kfree_skb(skb);
+       return ret;
+}
+
+int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe)
+{
+       if (tx_pipe->dma_channel) {
+               knav_dma_close_channel(tx_pipe->dma_channel);
+               tx_pipe->dma_channel = NULL;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(netcp_txpipe_close);
+
+int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
+{
+       struct device *dev = tx_pipe->netcp_device->device;
+       struct knav_dma_cfg config;
+       int ret = 0;
+       u8 name[16];
+
+       memset(&config, 0, sizeof(config));
+       config.direction = DMA_MEM_TO_DEV;
+       config.u.tx.filt_einfo = false;
+       config.u.tx.filt_pswords = false;
+       config.u.tx.priority = DMA_PRIO_MED_L;
+
+       tx_pipe->dma_channel = knav_dma_open_channel(dev,
+                               tx_pipe->dma_chan_name, &config);
+       if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) {
+               dev_err(dev, "failed opening tx chan(%s)\n",
+                       tx_pipe->dma_chan_name);
+               goto err;
+       }
+
+       snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev));
+       tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
+                                            KNAV_QUEUE_SHARED);
+       if (IS_ERR(tx_pipe->dma_queue)) {
+               dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
+                       name, ret);
+               ret = PTR_ERR(tx_pipe->dma_queue);
+               goto err;
+       }
+
+       dev_dbg(dev, "opened tx pipe %s\n", name);
+       return 0;
+
+err:
+       if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
+               knav_dma_close_channel(tx_pipe->dma_channel);
+       tx_pipe->dma_channel = NULL;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(netcp_txpipe_open);
+
+int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
+                     struct netcp_device *netcp_device,
+                     const char *dma_chan_name, unsigned int dma_queue_id)
+{
+       memset(tx_pipe, 0, sizeof(*tx_pipe));
+       tx_pipe->netcp_device = netcp_device;
+       tx_pipe->dma_chan_name = dma_chan_name;
+       tx_pipe->dma_queue_id = dma_queue_id;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(netcp_txpipe_init);
+
+static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp,
+                                         const u8 *addr,
+                                         enum netcp_addr_type type)
+{
+       struct netcp_addr *naddr;
+
+       list_for_each_entry(naddr, &netcp->addr_list, node) {
+               if (naddr->type != type)
+                       continue;
+               if (addr && memcmp(addr, naddr->addr, ETH_ALEN))
+                       continue;
+               return naddr;
+       }
+
+       return NULL;
+}
+
+static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp,
+                                        const u8 *addr,
+                                        enum netcp_addr_type type)
+{
+       struct netcp_addr *naddr;
+
+       naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC);
+       if (!naddr)
+               return NULL;
+
+       naddr->type = type;
+       naddr->flags = 0;
+       naddr->netcp = netcp;
+       if (addr)
+               ether_addr_copy(naddr->addr, addr);
+       else
+               memset(naddr->addr, 0, ETH_ALEN);
+       list_add_tail(&naddr->node, &netcp->addr_list);
+
+       return naddr;
+}
+
+static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr)
+{
+       list_del(&naddr->node);
+       devm_kfree(netcp->dev, naddr);
+}
+
+static void netcp_addr_clear_mark(struct netcp_intf *netcp)
+{
+       struct netcp_addr *naddr;
+
+       list_for_each_entry(naddr, &netcp->addr_list, node)
+               naddr->flags = 0;
+}
+
+static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr,
+                               enum netcp_addr_type type)
+{
+       struct netcp_addr *naddr;
+
+       naddr = netcp_addr_find(netcp, addr, type);
+       if (naddr) {
+               naddr->flags |= ADDR_VALID;
+               return;
+       }
+
+       naddr = netcp_addr_add(netcp, addr, type);
+       if (!WARN_ON(!naddr))
+               naddr->flags |= ADDR_NEW;
+}
+
+static void netcp_addr_sweep_del(struct netcp_intf *netcp)
+{
+       struct netcp_addr *naddr, *tmp;
+       struct netcp_intf_modpriv *priv;
+       struct netcp_module *module;
+       int error;
+
+       list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
+               if (naddr->flags & (ADDR_VALID | ADDR_NEW))
+                       continue;
+               dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
+                       naddr->addr, naddr->type);
+               mutex_lock(&netcp_modules_lock);
+               for_each_module(netcp, priv) {
+                       module = priv->netcp_module;
+                       if (!module->del_addr)
+                               continue;
+                       error = module->del_addr(priv->module_priv,
+                                                naddr);
+                       WARN_ON(error);
+               }
+               mutex_unlock(&netcp_modules_lock);
+               netcp_addr_del(netcp, naddr);
+       }
+}
+
+static void netcp_addr_sweep_add(struct netcp_intf *netcp)
+{
+       struct netcp_addr *naddr, *tmp;
+       struct netcp_intf_modpriv *priv;
+       struct netcp_module *module;
+       int error;
+
+       list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
+               if (!(naddr->flags & ADDR_NEW))
+                       continue;
+               dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
+                       naddr->addr, naddr->type);
+               mutex_lock(&netcp_modules_lock);
+               for_each_module(netcp, priv) {
+                       module = priv->netcp_module;
+                       if (!module->add_addr)
+                               continue;
+                       error = module->add_addr(priv->module_priv, naddr);
+                       WARN_ON(error);
+               }
+               mutex_unlock(&netcp_modules_lock);
+       }
+}
+
+static void netcp_set_rx_mode(struct net_device *ndev)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct netdev_hw_addr *ndev_addr;
+       bool promisc;
+
+       promisc = (ndev->flags & IFF_PROMISC ||
+                  ndev->flags & IFF_ALLMULTI ||
+                  netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
+
+       /* first clear all marks */
+       netcp_addr_clear_mark(netcp);
+
+       /* next add new entries, mark existing ones */
+       netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST);
+       for_each_dev_addr(ndev, ndev_addr)
+               netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV);
+       netdev_for_each_uc_addr(ndev_addr, ndev)
+               netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST);
+       netdev_for_each_mc_addr(ndev_addr, ndev)
+               netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST);
+
+       if (promisc)
+               netcp_addr_add_mark(netcp, NULL, ADDR_ANY);
+
+       /* finally sweep and callout into modules */
+       netcp_addr_sweep_del(netcp);
+       netcp_addr_sweep_add(netcp);
+}
+
+static void netcp_free_navigator_resources(struct netcp_intf *netcp)
+{
+       int i;
+
+       if (netcp->rx_channel) {
+               knav_dma_close_channel(netcp->rx_channel);
+               netcp->rx_channel = NULL;
+       }
+
+       if (!IS_ERR_OR_NULL(netcp->rx_pool))
+               netcp_rxpool_free(netcp);
+
+       if (!IS_ERR_OR_NULL(netcp->rx_queue)) {
+               knav_queue_close(netcp->rx_queue);
+               netcp->rx_queue = NULL;
+       }
+
+       for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
+            !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) {
+               knav_queue_close(netcp->rx_fdq[i]);
+               netcp->rx_fdq[i] = NULL;
+       }
+
+       if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) {
+               knav_queue_close(netcp->tx_compl_q);
+               netcp->tx_compl_q = NULL;
+       }
+
+       if (!IS_ERR_OR_NULL(netcp->tx_pool)) {
+               knav_pool_destroy(netcp->tx_pool);
+               netcp->tx_pool = NULL;
+       }
+}
+
+static int netcp_setup_navigator_resources(struct net_device *ndev)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct knav_queue_notify_config notify_cfg;
+       struct knav_dma_cfg config;
+       u32 last_fdq = 0;
+       u8 name[16];
+       int ret;
+       int i;
+
+       /* Create Rx/Tx descriptor pools */
+       snprintf(name, sizeof(name), "rx-pool-%s", ndev->name);
+       netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size,
+                                               netcp->rx_pool_region_id);
+       if (IS_ERR_OR_NULL(netcp->rx_pool)) {
+               dev_err(netcp->ndev_dev, "Couldn't create rx pool\n");
+               ret = PTR_ERR(netcp->rx_pool);
+               goto fail;
+       }
+
+       snprintf(name, sizeof(name), "tx-pool-%s", ndev->name);
+       netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size,
+                                               netcp->tx_pool_region_id);
+       if (IS_ERR_OR_NULL(netcp->tx_pool)) {
+               dev_err(netcp->ndev_dev, "Couldn't create tx pool\n");
+               ret = PTR_ERR(netcp->tx_pool);
+               goto fail;
+       }
+
+       /* open Tx completion queue */
+       snprintf(name, sizeof(name), "tx-compl-%s", ndev->name);
+       netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0);
+       if (IS_ERR_OR_NULL(netcp->tx_compl_q)) {
+               ret = PTR_ERR(netcp->tx_compl_q);
+               goto fail;
+       }
+       netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q);
+
+       /* Set notification for Tx completion */
+       notify_cfg.fn = netcp_tx_notify;
+       notify_cfg.fn_arg = netcp;
+       ret = knav_queue_device_control(netcp->tx_compl_q,
+                                       KNAV_QUEUE_SET_NOTIFIER,
+                                       (unsigned long)&notify_cfg);
+       if (ret)
+               goto fail;
+
+       knav_queue_disable_notify(netcp->tx_compl_q);
+
+       /* open Rx completion queue */
+       snprintf(name, sizeof(name), "rx-compl-%s", ndev->name);
+       netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0);
+       if (IS_ERR_OR_NULL(netcp->rx_queue)) {
+               ret = PTR_ERR(netcp->rx_queue);
+               goto fail;
+       }
+       netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue);
+
+       /* Set notification for Rx completion */
+       notify_cfg.fn = netcp_rx_notify;
+       notify_cfg.fn_arg = netcp;
+       ret = knav_queue_device_control(netcp->rx_queue,
+                                       KNAV_QUEUE_SET_NOTIFIER,
+                                       (unsigned long)&notify_cfg);
+       if (ret)
+               goto fail;
+
+       knav_queue_disable_notify(netcp->rx_queue);
+
+       /* open Rx FDQs */
+       for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
+            netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) {
+               snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
+               netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
+               if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
+                       ret = PTR_ERR(netcp->rx_fdq[i]);
+                       goto fail;
+               }
+       }
+
+       memset(&config, 0, sizeof(config));
+       config.direction                = DMA_DEV_TO_MEM;
+       config.u.rx.einfo_present       = true;
+       config.u.rx.psinfo_present      = true;
+       config.u.rx.err_mode            = DMA_DROP;
+       config.u.rx.desc_type           = DMA_DESC_HOST;
+       config.u.rx.psinfo_at_sop       = false;
+       config.u.rx.sop_offset          = NETCP_SOP_OFFSET;
+       config.u.rx.dst_q               = netcp->rx_queue_id;
+       config.u.rx.thresh              = DMA_THRESH_NONE;
+
+       for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) {
+               if (netcp->rx_fdq[i])
+                       last_fdq = knav_queue_get_id(netcp->rx_fdq[i]);
+               config.u.rx.fdq[i] = last_fdq;
+       }
+
+       netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
+                                       netcp->dma_chan_name, &config);
+       if (IS_ERR_OR_NULL(netcp->rx_channel)) {
+               dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
+                       netcp->dma_chan_name);
+               goto fail;
+       }
+
+       dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel);
+       return 0;
+
+fail:
+       netcp_free_navigator_resources(netcp);
+       return ret;
+}
+
+/* Open the device */
+static int netcp_ndo_open(struct net_device *ndev)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct netcp_intf_modpriv *intf_modpriv;
+       struct netcp_module *module;
+       int ret;
+
+       netif_carrier_off(ndev);
+       ret = netcp_setup_navigator_resources(ndev);
+       if (ret) {
+               dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n");
+               goto fail;
+       }
+
+       mutex_lock(&netcp_modules_lock);
+       for_each_module(netcp, intf_modpriv) {
+               module = intf_modpriv->netcp_module;
+               if (module->open) {
+                       ret = module->open(intf_modpriv->module_priv, ndev);
+                       if (ret != 0) {
+                               dev_err(netcp->ndev_dev, "module open failed\n");
+                               goto fail_open;
+                       }
+               }
+       }
+       mutex_unlock(&netcp_modules_lock);
+
+       netcp_rxpool_refill(netcp);
+       napi_enable(&netcp->rx_napi);
+       napi_enable(&netcp->tx_napi);
+       knav_queue_enable_notify(netcp->tx_compl_q);
+       knav_queue_enable_notify(netcp->rx_queue);
+       netif_tx_wake_all_queues(ndev);
+       dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
+       return 0;
+
+fail_open:
+       for_each_module(netcp, intf_modpriv) {
+               module = intf_modpriv->netcp_module;
+               if (module->close)
+                       module->close(intf_modpriv->module_priv, ndev);
+       }
+       mutex_unlock(&netcp_modules_lock);
+
+fail:
+       netcp_free_navigator_resources(netcp);
+       return ret;
+}
+
+/* Close the device */
+static int netcp_ndo_stop(struct net_device *ndev)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct netcp_intf_modpriv *intf_modpriv;
+       struct netcp_module *module;
+       int err = 0;
+
+       netif_tx_stop_all_queues(ndev);
+       netif_carrier_off(ndev);
+       netcp_addr_clear_mark(netcp);
+       netcp_addr_sweep_del(netcp);
+       knav_queue_disable_notify(netcp->rx_queue);
+       knav_queue_disable_notify(netcp->tx_compl_q);
+       napi_disable(&netcp->rx_napi);
+       napi_disable(&netcp->tx_napi);
+
+       mutex_lock(&netcp_modules_lock);
+       for_each_module(netcp, intf_modpriv) {
+               module = intf_modpriv->netcp_module;
+               if (module->close) {
+                       err = module->close(intf_modpriv->module_priv, ndev);
+                       if (err != 0)
+                               dev_err(netcp->ndev_dev, "Close failed\n");
+               }
+       }
+       mutex_unlock(&netcp_modules_lock);
+
+       /* Recycle Rx descriptors from completion queue */
+       netcp_empty_rx_queue(netcp);
+
+       /* Recycle Tx descriptors from completion queue */
+       netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
+
+       if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size)
+               dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n",
+                       netcp->tx_pool_size - knav_pool_count(netcp->tx_pool));
+
+       netcp_free_navigator_resources(netcp);
+       dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name);
+       return 0;
+}
+
+static int netcp_ndo_ioctl(struct net_device *ndev,
+                          struct ifreq *req, int cmd)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct netcp_intf_modpriv *intf_modpriv;
+       struct netcp_module *module;
+       int ret = -1, err = -EOPNOTSUPP;
+
+       if (!netif_running(ndev))
+               return -EINVAL;
+
+       mutex_lock(&netcp_modules_lock);
+       for_each_module(netcp, intf_modpriv) {
+               module = intf_modpriv->netcp_module;
+               if (!module->ioctl)
+                       continue;
+
+               err = module->ioctl(intf_modpriv->module_priv, req, cmd);
+               if ((err < 0) && (err != -EOPNOTSUPP)) {
+                       ret = err;
+                       goto out;
+               }
+               if (err == 0)
+                       ret = err;
+       }
+
+out:
+       mutex_unlock(&netcp_modules_lock);
+       return (ret == 0) ? 0 : err;
+}
+
+static int netcp_ndo_change_mtu(struct net_device *ndev, int new_mtu)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+
+       /* MTU < 68 is an error for IPv4 traffic */
+       if ((new_mtu < 68) ||
+           (new_mtu > (NETCP_MAX_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN))) {
+               dev_err(netcp->ndev_dev, "Invalid mtu size = %d\n", new_mtu);
+               return -EINVAL;
+       }
+
+       ndev->mtu = new_mtu;
+       return 0;
+}
+
+static void netcp_ndo_tx_timeout(struct net_device *ndev)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       unsigned int descs = knav_pool_count(netcp->tx_pool);
+
+       dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
+       netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
+       ndev->trans_start = jiffies;
+       netif_tx_wake_all_queues(ndev);
+}
+
+static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct netcp_intf_modpriv *intf_modpriv;
+       struct netcp_module *module;
+       int err = 0;
+
+       dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
+
+       mutex_lock(&netcp_modules_lock);
+       for_each_module(netcp, intf_modpriv) {
+               module = intf_modpriv->netcp_module;
+               if ((module->add_vid) && (vid != 0)) {
+                       err = module->add_vid(intf_modpriv->module_priv, vid);
+                       if (err != 0) {
+                               dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n",
+                                       vid);
+                               break;
+                       }
+               }
+       }
+       mutex_unlock(&netcp_modules_lock);
+       return err;
+}
+
+static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct netcp_intf_modpriv *intf_modpriv;
+       struct netcp_module *module;
+       int err = 0;
+
+       dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
+
+       mutex_lock(&netcp_modules_lock);
+       for_each_module(netcp, intf_modpriv) {
+               module = intf_modpriv->netcp_module;
+               if (module->del_vid) {
+                       err = module->del_vid(intf_modpriv->module_priv, vid);
+                       if (err != 0) {
+                               dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n",
+                                       vid);
+                               break;
+                       }
+               }
+       }
+       mutex_unlock(&netcp_modules_lock);
+       return err;
+}
+
+static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
+                             void *accel_priv,
+                             select_queue_fallback_t fallback)
+{
+       return 0;
+}
+
+static int netcp_setup_tc(struct net_device *dev, u8 num_tc)
+{
+       int i;
+
+       /* setup tc must be called under rtnl lock */
+       ASSERT_RTNL();
+
+       /* Sanity-check the number of traffic classes requested */
+       if ((dev->real_num_tx_queues <= 1) ||
+           (dev->real_num_tx_queues < num_tc))
+               return -EINVAL;
+
+       /* Configure traffic class to queue mappings */
+       if (num_tc) {
+               netdev_set_num_tc(dev, num_tc);
+               for (i = 0; i < num_tc; i++)
+                       netdev_set_tc_queue(dev, i, 1, i);
+       } else {
+               netdev_reset_tc(dev);
+       }
+
+       return 0;
+}
+
+static const struct net_device_ops netcp_netdev_ops = {
+       .ndo_open               = netcp_ndo_open,
+       .ndo_stop               = netcp_ndo_stop,
+       .ndo_start_xmit         = netcp_ndo_start_xmit,
+       .ndo_set_rx_mode        = netcp_set_rx_mode,
+       .ndo_do_ioctl           = netcp_ndo_ioctl,
+       .ndo_change_mtu         = netcp_ndo_change_mtu,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_vlan_rx_add_vid    = netcp_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = netcp_rx_kill_vid,
+       .ndo_tx_timeout         = netcp_ndo_tx_timeout,
+       .ndo_select_queue       = netcp_select_queue,
+       .ndo_setup_tc           = netcp_setup_tc,
+};
+
+static int netcp_create_interface(struct netcp_device *netcp_device,
+                                 struct device_node *node_interface)
+{
+       struct device *dev = netcp_device->device;
+       struct device_node *node = dev->of_node;
+       struct netcp_intf *netcp;
+       struct net_device *ndev;
+       resource_size_t size;
+       struct resource res;
+       void __iomem *efuse = NULL;
+       u32 efuse_mac = 0;
+       const void *mac_addr;
+       u8 efuse_mac_addr[6];
+       u32 temp[2];
+       int ret = 0;
+
+       ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1);
+       if (!ndev) {
+               dev_err(dev, "Error allocating netdev\n");
+               return -ENOMEM;
+       }
+
+       ndev->features |= NETIF_F_SG;
+       ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+       ndev->hw_features = ndev->features;
+       ndev->vlan_features |=  NETIF_F_SG;
+
+       netcp = netdev_priv(ndev);
+       spin_lock_init(&netcp->lock);
+       INIT_LIST_HEAD(&netcp->module_head);
+       INIT_LIST_HEAD(&netcp->txhook_list_head);
+       INIT_LIST_HEAD(&netcp->rxhook_list_head);
+       INIT_LIST_HEAD(&netcp->addr_list);
+       netcp->netcp_device = netcp_device;
+       netcp->dev = netcp_device->device;
+       netcp->ndev = ndev;
+       netcp->ndev_dev  = &ndev->dev;
+       netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG);
+       netcp->tx_pause_threshold = MAX_SKB_FRAGS;
+       netcp->tx_resume_threshold = netcp->tx_pause_threshold;
+       netcp->node_interface = node_interface;
+
+       ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac);
+       if (efuse_mac) {
+               if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) {
+                       dev_err(dev, "could not find efuse-mac reg resource\n");
+                       ret = -ENODEV;
+                       goto quit;
+               }
+               size = resource_size(&res);
+
+               if (!devm_request_mem_region(dev, res.start, size,
+                                            dev_name(dev))) {
+                       dev_err(dev, "could not reserve resource\n");
+                       ret = -ENOMEM;
+                       goto quit;
+               }
+
+               efuse = devm_ioremap_nocache(dev, res.start, size);
+               if (!efuse) {
+                       dev_err(dev, "could not map resource\n");
+                       devm_release_mem_region(dev, res.start, size);
+                       ret = -ENOMEM;
+                       goto quit;
+               }
+
+               emac_arch_get_mac_addr(efuse_mac_addr, efuse);
+               if (is_valid_ether_addr(efuse_mac_addr))
+                       ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
+               else
+                       random_ether_addr(ndev->dev_addr);
+
+               devm_iounmap(dev, efuse);
+               devm_release_mem_region(dev, res.start, size);
+       } else {
+               mac_addr = of_get_mac_address(node_interface);
+               if (mac_addr)
+                       ether_addr_copy(ndev->dev_addr, mac_addr);
+               else
+                       random_ether_addr(ndev->dev_addr);
+       }
+
+       ret = of_property_read_string(node_interface, "rx-channel",
+                                     &netcp->dma_chan_name);
+       if (ret < 0) {
+               dev_err(dev, "missing \"rx-channel\" parameter\n");
+               ret = -ENODEV;
+               goto quit;
+       }
+
+       ret = of_property_read_u32(node_interface, "rx-queue",
+                                  &netcp->rx_queue_id);
+       if (ret < 0) {
+               dev_warn(dev, "missing \"rx-queue\" parameter\n");
+               netcp->rx_queue_id = KNAV_QUEUE_QPEND;
+       }
+
+       ret = of_property_read_u32_array(node_interface, "rx-queue-depth",
+                                        netcp->rx_queue_depths,
+                                        KNAV_DMA_FDQ_PER_CHAN);
+       if (ret < 0) {
+               dev_err(dev, "missing \"rx-queue-depth\" parameter\n");
+               netcp->rx_queue_depths[0] = 128;
+       }
+
+       ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
+                                        netcp->rx_buffer_sizes,
+                                        KNAV_DMA_FDQ_PER_CHAN);
+       if (ret) {
+               dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
+               netcp->rx_buffer_sizes[0] = 1536;
+       }
+
+       ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
+       if (ret < 0) {
+               dev_err(dev, "missing \"rx-pool\" parameter\n");
+               ret = -ENODEV;
+               goto quit;
+       }
+       netcp->rx_pool_size = temp[0];
+       netcp->rx_pool_region_id = temp[1];
+
+       ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2);
+       if (ret < 0) {
+               dev_err(dev, "missing \"tx-pool\" parameter\n");
+               ret = -ENODEV;
+               goto quit;
+       }
+       netcp->tx_pool_size = temp[0];
+       netcp->tx_pool_region_id = temp[1];
+
+       if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
+               dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n",
+                       MAX_SKB_FRAGS);
+               ret = -ENODEV;
+               goto quit;
+       }
+
+       ret = of_property_read_u32(node_interface, "tx-completion-queue",
+                                  &netcp->tx_compl_qid);
+       if (ret < 0) {
+               dev_warn(dev, "missing \"tx-completion-queue\" parameter\n");
+               netcp->tx_compl_qid = KNAV_QUEUE_QPEND;
+       }
+
+       /* NAPI register */
+       netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT);
+       netif_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT);
+
+       /* Register the network device */
+       ndev->dev_id            = 0;
+       ndev->watchdog_timeo    = NETCP_TX_TIMEOUT;
+       ndev->netdev_ops        = &netcp_netdev_ops;
+       SET_NETDEV_DEV(ndev, dev);
+
+       list_add_tail(&netcp->interface_list, &netcp_device->interface_head);
+       return 0;
+
+quit:
+       free_netdev(ndev);
+       return ret;
+}
+
+static void netcp_delete_interface(struct netcp_device *netcp_device,
+                                  struct net_device *ndev)
+{
+       struct netcp_intf_modpriv *intf_modpriv, *tmp;
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct netcp_module *module;
+
+       dev_dbg(netcp_device->device, "Removing interface \"%s\"\n",
+               ndev->name);
+
+       /* Notify each of the modules that the interface is going away */
+       list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head,
+                                intf_list) {
+               module = intf_modpriv->netcp_module;
+               dev_dbg(netcp_device->device, "Releasing module \"%s\"\n",
+                       module->name);
+               if (module->release)
+                       module->release(intf_modpriv->module_priv);
+               list_del(&intf_modpriv->intf_list);
+               kfree(intf_modpriv);
+       }
+       WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n",
+            ndev->name);
+
+       list_del(&netcp->interface_list);
+
+       of_node_put(netcp->node_interface);
+       unregister_netdev(ndev);
+       netif_napi_del(&netcp->rx_napi);
+       free_netdev(ndev);
+}
+
+static int netcp_probe(struct platform_device *pdev)
+{
+       struct device_node *node = pdev->dev.of_node;
+       struct netcp_intf *netcp_intf, *netcp_tmp;
+       struct device_node *child, *interfaces;
+       struct netcp_device *netcp_device;
+       struct device *dev = &pdev->dev;
+       struct netcp_module *module;
+       int ret;
+
+       if (!node) {
+               dev_err(dev, "could not find device info\n");
+               return -ENODEV;
+       }
+
+       /* Allocate a new NETCP device instance */
+       netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL);
+       if (!netcp_device)
+               return -ENOMEM;
+
+       pm_runtime_enable(&pdev->dev);
+       ret = pm_runtime_get_sync(&pdev->dev);
+       if (ret < 0) {
+               dev_err(dev, "Failed to enable NETCP power-domain\n");
+               pm_runtime_disable(&pdev->dev);
+               return ret;
+       }
+
+       /* Initialize the NETCP device instance */
+       INIT_LIST_HEAD(&netcp_device->interface_head);
+       INIT_LIST_HEAD(&netcp_device->modpriv_head);
+       netcp_device->device = dev;
+       platform_set_drvdata(pdev, netcp_device);
+
+       /* create interfaces */
+       interfaces = of_get_child_by_name(node, "netcp-interfaces");
+       if (!interfaces) {
+               dev_err(dev, "could not find netcp-interfaces node\n");
+               ret = -ENODEV;
+               goto probe_quit;
+       }
+
+       for_each_available_child_of_node(interfaces, child) {
+               ret = netcp_create_interface(netcp_device, child);
+               if (ret) {
+                       dev_err(dev, "could not create interface(%s)\n",
+                               child->name);
+                       goto probe_quit_interface;
+               }
+       }
+
+       /* Add the device instance to the list */
+       list_add_tail(&netcp_device->device_list, &netcp_devices);
+
+       /* Probe & attach any modules already registered */
+       mutex_lock(&netcp_modules_lock);
+       for_each_netcp_module(module) {
+               ret = netcp_module_probe(netcp_device, module);
+               if (ret < 0)
+                       dev_err(dev, "module(%s) probe failed\n", module->name);
+       }
+       mutex_unlock(&netcp_modules_lock);
+       return 0;
+
+probe_quit_interface:
+       list_for_each_entry_safe(netcp_intf, netcp_tmp,
+                                &netcp_device->interface_head,
+                                interface_list) {
+               netcp_delete_interface(netcp_device, netcp_intf->ndev);
+       }
+
+probe_quit:
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+       platform_set_drvdata(pdev, NULL);
+       return ret;
+}
+
+static int netcp_remove(struct platform_device *pdev)
+{
+       struct netcp_device *netcp_device = platform_get_drvdata(pdev);
+       struct netcp_inst_modpriv *inst_modpriv, *tmp;
+       struct netcp_module *module;
+
+       list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head,
+                                inst_list) {
+               module = inst_modpriv->netcp_module;
+               dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name);
+               module->remove(netcp_device, inst_modpriv->module_priv);
+               list_del(&inst_modpriv->inst_list);
+               kfree(inst_modpriv);
+       }
+       WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
+            pdev->name);
+
+       devm_kfree(&pdev->dev, netcp_device);
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+static struct of_device_id of_match[] = {
+       { .compatible = "ti,netcp-1.0", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver netcp_driver = {
+       .driver = {
+               .name           = "netcp-1.0",
+               .owner          = THIS_MODULE,
+               .of_match_table = of_match,
+       },
+       .probe = netcp_probe,
+       .remove = netcp_remove,
+};
+module_platform_driver(netcp_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
new file mode 100644 (file)
index 0000000..84f5ce5
--- /dev/null
@@ -0,0 +1,2159 @@
+/*
+ * Keystone GBE and XGBE subsystem code
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors:    Sandeep Nair <sandeep_n@ti.com>
+ *             Sandeep Paulraj <s-paulraj@ti.com>
+ *             Cyril Chemparathy <cyril@ti.com>
+ *             Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *             Wingman Kwok <w-kwok2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_address.h>
+#include <linux/if_vlan.h>
+#include <linux/ethtool.h>
+
+#include "cpsw_ale.h"
+#include "netcp.h"
+
+#define NETCP_DRIVER_NAME              "TI KeyStone Ethernet Driver"
+#define NETCP_DRIVER_VERSION           "v1.0"
+
+#define GBE_IDENT(reg)                 ((reg >> 16) & 0xffff)
+#define GBE_MAJOR_VERSION(reg)         (reg >> 8 & 0x7)
+#define GBE_MINOR_VERSION(reg)         (reg & 0xff)
+#define GBE_RTL_VERSION(reg)           ((reg >> 11) & 0x1f)
+
+/* 1G Ethernet SS defines */
+#define GBE_MODULE_NAME                        "netcp-gbe"
+#define GBE_SS_VERSION_14              0x4ed21104
+
+#define GBE13_SGMII_MODULE_OFFSET      0x100
+#define GBE13_SGMII34_MODULE_OFFSET    0x400
+#define GBE13_SWITCH_MODULE_OFFSET     0x800
+#define GBE13_HOST_PORT_OFFSET         0x834
+#define GBE13_SLAVE_PORT_OFFSET                0x860
+#define GBE13_EMAC_OFFSET              0x900
+#define GBE13_SLAVE_PORT2_OFFSET       0xa00
+#define GBE13_HW_STATS_OFFSET          0xb00
+#define GBE13_ALE_OFFSET               0xe00
+#define GBE13_HOST_PORT_NUM            0
+#define GBE13_NUM_SLAVES               4
+#define GBE13_NUM_ALE_PORTS            (GBE13_NUM_SLAVES + 1)
+#define GBE13_NUM_ALE_ENTRIES          1024
+
+/* 10G Ethernet SS defines */
+#define XGBE_MODULE_NAME               "netcp-xgbe"
+#define XGBE_SS_VERSION_10             0x4ee42100
+
+#define XGBE_SERDES_REG_INDEX          1
+#define XGBE10_SGMII_MODULE_OFFSET     0x100
+#define XGBE10_SWITCH_MODULE_OFFSET    0x1000
+#define XGBE10_HOST_PORT_OFFSET                0x1034
+#define XGBE10_SLAVE_PORT_OFFSET       0x1064
+#define XGBE10_EMAC_OFFSET             0x1400
+#define XGBE10_ALE_OFFSET              0x1700
+#define XGBE10_HW_STATS_OFFSET         0x1800
+#define XGBE10_HOST_PORT_NUM           0
+#define XGBE10_NUM_SLAVES              2
+#define XGBE10_NUM_ALE_PORTS           (XGBE10_NUM_SLAVES + 1)
+#define XGBE10_NUM_ALE_ENTRIES         1024
+
+#define        GBE_TIMER_INTERVAL                      (HZ / 2)
+
+/* Soft reset register values */
+#define SOFT_RESET_MASK                                BIT(0)
+#define SOFT_RESET                             BIT(0)
+#define DEVICE_EMACSL_RESET_POLL_COUNT         100
+#define GMACSL_RET_WARN_RESET_INCOMPLETE       -2
+
+#define MACSL_RX_ENABLE_CSF                    BIT(23)
+#define MACSL_ENABLE_EXT_CTL                   BIT(18)
+#define MACSL_XGMII_ENABLE                     BIT(13)
+#define MACSL_XGIG_MODE                                BIT(8)
+#define MACSL_GIG_MODE                         BIT(7)
+#define MACSL_GMII_ENABLE                      BIT(5)
+#define MACSL_FULLDUPLEX                       BIT(0)
+
+#define GBE_CTL_P0_ENABLE                      BIT(2)
+#define GBE_REG_VAL_STAT_ENABLE_ALL            0xff
+#define XGBE_REG_VAL_STAT_ENABLE_ALL           0xf
+#define GBE_STATS_CD_SEL                       BIT(28)
+
+#define GBE_PORT_MASK(x)                       (BIT(x) - 1)
+#define GBE_MASK_NO_PORTS                      0
+
+#define GBE_DEF_1G_MAC_CONTROL                                 \
+               (MACSL_GIG_MODE | MACSL_GMII_ENABLE |           \
+                MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
+
+#define GBE_DEF_10G_MAC_CONTROL                                \
+               (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |         \
+                MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
+
+#define GBE_STATSA_MODULE                      0
+#define GBE_STATSB_MODULE                      1
+#define GBE_STATSC_MODULE                      2
+#define GBE_STATSD_MODULE                      3
+
+#define XGBE_STATS0_MODULE                     0
+#define XGBE_STATS1_MODULE                     1
+#define XGBE_STATS2_MODULE                     2
+
+#define MAX_SLAVES                             GBE13_NUM_SLAVES
+/* s: 0-based slave_port */
+#define SGMII_BASE(s) \
+       (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
+
+#define GBE_TX_QUEUE                           648
+#define        GBE_TXHOOK_ORDER                        0
+#define GBE_DEFAULT_ALE_AGEOUT                 30
+#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
+#define NETCP_LINK_STATE_INVALID               -1
+
+#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
+               offsetof(struct gbe##_##rb, rn)
+#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
+               offsetof(struct xgbe##_##rb, rn)
+#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
+
+struct xgbe_ss_regs {
+       u32     id_ver;
+       u32     synce_count;
+       u32     synce_mux;
+       u32     control;
+};
+
+struct xgbe_switch_regs {
+       u32     id_ver;
+       u32     control;
+       u32     emcontrol;
+       u32     stat_port_en;
+       u32     ptype;
+       u32     soft_idle;
+       u32     thru_rate;
+       u32     gap_thresh;
+       u32     tx_start_wds;
+       u32     flow_control;
+       u32     cppi_thresh;
+};
+
+struct xgbe_port_regs {
+       u32     blk_cnt;
+       u32     port_vlan;
+       u32     tx_pri_map;
+       u32     sa_lo;
+       u32     sa_hi;
+       u32     ts_ctl;
+       u32     ts_seq_ltype;
+       u32     ts_vlan;
+       u32     ts_ctl_ltype2;
+       u32     ts_ctl2;
+       u32     control;
+};
+
+struct xgbe_host_port_regs {
+       u32     blk_cnt;
+       u32     port_vlan;
+       u32     tx_pri_map;
+       u32     src_id;
+       u32     rx_pri_map;
+       u32     rx_maxlen;
+};
+
+struct xgbe_emac_regs {
+       u32     id_ver;
+       u32     mac_control;
+       u32     mac_status;
+       u32     soft_reset;
+       u32     rx_maxlen;
+       u32     __reserved_0;
+       u32     rx_pause;
+       u32     tx_pause;
+       u32     em_control;
+       u32     __reserved_1;
+       u32     tx_gap;
+       u32     rsvd[4];
+};
+
+struct xgbe_host_hw_stats {
+       u32     rx_good_frames;
+       u32     rx_broadcast_frames;
+       u32     rx_multicast_frames;
+       u32     __rsvd_0[3];
+       u32     rx_oversized_frames;
+       u32     __rsvd_1;
+       u32     rx_undersized_frames;
+       u32     __rsvd_2;
+       u32     overrun_type4;
+       u32     overrun_type5;
+       u32     rx_bytes;
+       u32     tx_good_frames;
+       u32     tx_broadcast_frames;
+       u32     tx_multicast_frames;
+       u32     __rsvd_3[9];
+       u32     tx_bytes;
+       u32     tx_64byte_frames;
+       u32     tx_65_to_127byte_frames;
+       u32     tx_128_to_255byte_frames;
+       u32     tx_256_to_511byte_frames;
+       u32     tx_512_to_1023byte_frames;
+       u32     tx_1024byte_frames;
+       u32     net_bytes;
+       u32     rx_sof_overruns;
+       u32     rx_mof_overruns;
+       u32     rx_dma_overruns;
+};
+
+struct xgbe_hw_stats {
+       u32     rx_good_frames;
+       u32     rx_broadcast_frames;
+       u32     rx_multicast_frames;
+       u32     rx_pause_frames;
+       u32     rx_crc_errors;
+       u32     rx_align_code_errors;
+       u32     rx_oversized_frames;
+       u32     rx_jabber_frames;
+       u32     rx_undersized_frames;
+       u32     rx_fragments;
+       u32     overrun_type4;
+       u32     overrun_type5;
+       u32     rx_bytes;
+       u32     tx_good_frames;
+       u32     tx_broadcast_frames;
+       u32     tx_multicast_frames;
+       u32     tx_pause_frames;
+       u32     tx_deferred_frames;
+       u32     tx_collision_frames;
+       u32     tx_single_coll_frames;
+       u32     tx_mult_coll_frames;
+       u32     tx_excessive_collisions;
+       u32     tx_late_collisions;
+       u32     tx_underrun;
+       u32     tx_carrier_sense_errors;
+       u32     tx_bytes;
+       u32     tx_64byte_frames;
+       u32     tx_65_to_127byte_frames;
+       u32     tx_128_to_255byte_frames;
+       u32     tx_256_to_511byte_frames;
+       u32     tx_512_to_1023byte_frames;
+       u32     tx_1024byte_frames;
+       u32     net_bytes;
+       u32     rx_sof_overruns;
+       u32     rx_mof_overruns;
+       u32     rx_dma_overruns;
+};
+
+#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
+
+struct gbe_ss_regs {
+       u32     id_ver;
+       u32     synce_count;
+       u32     synce_mux;
+};
+
+struct gbe_ss_regs_ofs {
+       u16     id_ver;
+       u16     control;
+};
+
+struct gbe_switch_regs {
+       u32     id_ver;
+       u32     control;
+       u32     soft_reset;
+       u32     stat_port_en;
+       u32     ptype;
+       u32     soft_idle;
+       u32     thru_rate;
+       u32     gap_thresh;
+       u32     tx_start_wds;
+       u32     flow_control;
+};
+
+struct gbe_switch_regs_ofs {
+       u16     id_ver;
+       u16     control;
+       u16     soft_reset;
+       u16     emcontrol;
+       u16     stat_port_en;
+       u16     ptype;
+       u16     flow_control;
+};
+
+struct gbe_port_regs {
+       u32     max_blks;
+       u32     blk_cnt;
+       u32     port_vlan;
+       u32     tx_pri_map;
+       u32     sa_lo;
+       u32     sa_hi;
+       u32     ts_ctl;
+       u32     ts_seq_ltype;
+       u32     ts_vlan;
+       u32     ts_ctl_ltype2;
+       u32     ts_ctl2;
+};
+
+struct gbe_port_regs_ofs {
+       u16     port_vlan;
+       u16     tx_pri_map;
+       u16     sa_lo;
+       u16     sa_hi;
+       u16     ts_ctl;
+       u16     ts_seq_ltype;
+       u16     ts_vlan;
+       u16     ts_ctl_ltype2;
+       u16     ts_ctl2;
+};
+
+struct gbe_host_port_regs {
+       u32     src_id;
+       u32     port_vlan;
+       u32     rx_pri_map;
+       u32     rx_maxlen;
+};
+
+struct gbe_host_port_regs_ofs {
+       u16     port_vlan;
+       u16     tx_pri_map;
+       u16     rx_maxlen;
+};
+
+struct gbe_emac_regs {
+       u32     id_ver;
+       u32     mac_control;
+       u32     mac_status;
+       u32     soft_reset;
+       u32     rx_maxlen;
+       u32     __reserved_0;
+       u32     rx_pause;
+       u32     tx_pause;
+       u32     __reserved_1;
+       u32     rx_pri_map;
+       u32     rsvd[6];
+};
+
+struct gbe_emac_regs_ofs {
+       u16     mac_control;
+       u16     soft_reset;
+       u16     rx_maxlen;
+};
+
+struct gbe_hw_stats {
+       u32     rx_good_frames;
+       u32     rx_broadcast_frames;
+       u32     rx_multicast_frames;
+       u32     rx_pause_frames;
+       u32     rx_crc_errors;
+       u32     rx_align_code_errors;
+       u32     rx_oversized_frames;
+       u32     rx_jabber_frames;
+       u32     rx_undersized_frames;
+       u32     rx_fragments;
+       u32     __pad_0[2];
+       u32     rx_bytes;
+       u32     tx_good_frames;
+       u32     tx_broadcast_frames;
+       u32     tx_multicast_frames;
+       u32     tx_pause_frames;
+       u32     tx_deferred_frames;
+       u32     tx_collision_frames;
+       u32     tx_single_coll_frames;
+       u32     tx_mult_coll_frames;
+       u32     tx_excessive_collisions;
+       u32     tx_late_collisions;
+       u32     tx_underrun;
+       u32     tx_carrier_sense_errors;
+       u32     tx_bytes;
+       u32     tx_64byte_frames;
+       u32     tx_65_to_127byte_frames;
+       u32     tx_128_to_255byte_frames;
+       u32     tx_256_to_511byte_frames;
+       u32     tx_512_to_1023byte_frames;
+       u32     tx_1024byte_frames;
+       u32     net_bytes;
+       u32     rx_sof_overruns;
+       u32     rx_mof_overruns;
+       u32     rx_dma_overruns;
+};
+
+#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
+#define GBE13_NUM_HW_STATS_MOD                 2
+#define XGBE10_NUM_HW_STATS_MOD                        3
+#define GBE_MAX_HW_STAT_MODS                   3
+#define GBE_HW_STATS_REG_MAP_SZ                        0x100
+
+struct gbe_slave {
+       void __iomem                    *port_regs;
+       void __iomem                    *emac_regs;
+       struct gbe_port_regs_ofs        port_regs_ofs;
+       struct gbe_emac_regs_ofs        emac_regs_ofs;
+       int                             slave_num; /* 0 based logical number */
+       int                             port_num;  /* actual port number */
+       atomic_t                        link_state;
+       bool                            open;
+       struct phy_device               *phy;
+       u32                             link_interface;
+       u32                             mac_control;
+       u8                              phy_port_t;
+       struct device_node              *phy_node;
+       struct list_head                slave_list;
+};
+
+struct gbe_priv {
+       struct device                   *dev;
+       struct netcp_device             *netcp_device;
+       struct timer_list               timer;
+       u32                             num_slaves;
+       u32                             ale_entries;
+       u32                             ale_ports;
+       bool                            enable_ale;
+       struct netcp_tx_pipe            tx_pipe;
+
+       int                             host_port;
+       u32                             rx_packet_max;
+       u32                             ss_version;
+
+       void __iomem                    *ss_regs;
+       void __iomem                    *switch_regs;
+       void __iomem                    *host_port_regs;
+       void __iomem                    *ale_reg;
+       void __iomem                    *sgmii_port_regs;
+       void __iomem                    *sgmii_port34_regs;
+       void __iomem                    *xgbe_serdes_regs;
+       void __iomem                    *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
+
+       struct gbe_ss_regs_ofs          ss_regs_ofs;
+       struct gbe_switch_regs_ofs      switch_regs_ofs;
+       struct gbe_host_port_regs_ofs   host_port_regs_ofs;
+
+       struct cpsw_ale                 *ale;
+       unsigned int                    tx_queue_id;
+       const char                      *dma_chan_name;
+
+       struct list_head                gbe_intf_head;
+       struct list_head                secondary_slaves;
+       struct net_device               *dummy_ndev;
+
+       u64                             *hw_stats;
+       const struct netcp_ethtool_stat *et_stats;
+       int                             num_et_stats;
+       /*  Lock for updating the hwstats */
+       spinlock_t                      hw_stats_lock;
+};
+
+struct gbe_intf {
+       struct net_device       *ndev;
+       struct device           *dev;
+       struct gbe_priv         *gbe_dev;
+       struct netcp_tx_pipe    tx_pipe;
+       struct gbe_slave        *slave;
+       struct list_head        gbe_intf_list;
+       unsigned long           active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+};
+
+static struct netcp_module gbe_module;
+static struct netcp_module xgbe_module;
+
+/* Statistic management */
+struct netcp_ethtool_stat {
+       char desc[ETH_GSTRING_LEN];
+       int type;
+       u32 size;
+       int offset;
+};
+
+#define GBE_STATSA_INFO(field)         "GBE_A:"#field, GBE_STATSA_MODULE,\
+                               FIELD_SIZEOF(struct gbe_hw_stats, field), \
+                               offsetof(struct gbe_hw_stats, field)
+
+#define GBE_STATSB_INFO(field)         "GBE_B:"#field, GBE_STATSB_MODULE,\
+                               FIELD_SIZEOF(struct gbe_hw_stats, field), \
+                               offsetof(struct gbe_hw_stats, field)
+
+#define GBE_STATSC_INFO(field)         "GBE_C:"#field, GBE_STATSC_MODULE,\
+                               FIELD_SIZEOF(struct gbe_hw_stats, field), \
+                               offsetof(struct gbe_hw_stats, field)
+
+#define GBE_STATSD_INFO(field)         "GBE_D:"#field, GBE_STATSD_MODULE,\
+                               FIELD_SIZEOF(struct gbe_hw_stats, field), \
+                               offsetof(struct gbe_hw_stats, field)
+
+static const struct netcp_ethtool_stat gbe13_et_stats[] = {
+       /* GBE module A */
+       {GBE_STATSA_INFO(rx_good_frames)},
+       {GBE_STATSA_INFO(rx_broadcast_frames)},
+       {GBE_STATSA_INFO(rx_multicast_frames)},
+       {GBE_STATSA_INFO(rx_pause_frames)},
+       {GBE_STATSA_INFO(rx_crc_errors)},
+       {GBE_STATSA_INFO(rx_align_code_errors)},
+       {GBE_STATSA_INFO(rx_oversized_frames)},
+       {GBE_STATSA_INFO(rx_jabber_frames)},
+       {GBE_STATSA_INFO(rx_undersized_frames)},
+       {GBE_STATSA_INFO(rx_fragments)},
+       {GBE_STATSA_INFO(rx_bytes)},
+       {GBE_STATSA_INFO(tx_good_frames)},
+       {GBE_STATSA_INFO(tx_broadcast_frames)},
+       {GBE_STATSA_INFO(tx_multicast_frames)},
+       {GBE_STATSA_INFO(tx_pause_frames)},
+       {GBE_STATSA_INFO(tx_deferred_frames)},
+       {GBE_STATSA_INFO(tx_collision_frames)},
+       {GBE_STATSA_INFO(tx_single_coll_frames)},
+       {GBE_STATSA_INFO(tx_mult_coll_frames)},
+       {GBE_STATSA_INFO(tx_excessive_collisions)},
+       {GBE_STATSA_INFO(tx_late_collisions)},
+       {GBE_STATSA_INFO(tx_underrun)},
+       {GBE_STATSA_INFO(tx_carrier_sense_errors)},
+       {GBE_STATSA_INFO(tx_bytes)},
+       {GBE_STATSA_INFO(tx_64byte_frames)},
+       {GBE_STATSA_INFO(tx_65_to_127byte_frames)},
+       {GBE_STATSA_INFO(tx_128_to_255byte_frames)},
+       {GBE_STATSA_INFO(tx_256_to_511byte_frames)},
+       {GBE_STATSA_INFO(tx_512_to_1023byte_frames)},
+       {GBE_STATSA_INFO(tx_1024byte_frames)},
+       {GBE_STATSA_INFO(net_bytes)},
+       {GBE_STATSA_INFO(rx_sof_overruns)},
+       {GBE_STATSA_INFO(rx_mof_overruns)},
+       {GBE_STATSA_INFO(rx_dma_overruns)},
+       /* GBE module B */
+       {GBE_STATSB_INFO(rx_good_frames)},
+       {GBE_STATSB_INFO(rx_broadcast_frames)},
+       {GBE_STATSB_INFO(rx_multicast_frames)},
+       {GBE_STATSB_INFO(rx_pause_frames)},
+       {GBE_STATSB_INFO(rx_crc_errors)},
+       {GBE_STATSB_INFO(rx_align_code_errors)},
+       {GBE_STATSB_INFO(rx_oversized_frames)},
+       {GBE_STATSB_INFO(rx_jabber_frames)},
+       {GBE_STATSB_INFO(rx_undersized_frames)},
+       {GBE_STATSB_INFO(rx_fragments)},
+       {GBE_STATSB_INFO(rx_bytes)},
+       {GBE_STATSB_INFO(tx_good_frames)},
+       {GBE_STATSB_INFO(tx_broadcast_frames)},
+       {GBE_STATSB_INFO(tx_multicast_frames)},
+       {GBE_STATSB_INFO(tx_pause_frames)},
+       {GBE_STATSB_INFO(tx_deferred_frames)},
+       {GBE_STATSB_INFO(tx_collision_frames)},
+       {GBE_STATSB_INFO(tx_single_coll_frames)},
+       {GBE_STATSB_INFO(tx_mult_coll_frames)},
+       {GBE_STATSB_INFO(tx_excessive_collisions)},
+       {GBE_STATSB_INFO(tx_late_collisions)},
+       {GBE_STATSB_INFO(tx_underrun)},
+       {GBE_STATSB_INFO(tx_carrier_sense_errors)},
+       {GBE_STATSB_INFO(tx_bytes)},
+       {GBE_STATSB_INFO(tx_64byte_frames)},
+       {GBE_STATSB_INFO(tx_65_to_127byte_frames)},
+       {GBE_STATSB_INFO(tx_128_to_255byte_frames)},
+       {GBE_STATSB_INFO(tx_256_to_511byte_frames)},
+       {GBE_STATSB_INFO(tx_512_to_1023byte_frames)},
+       {GBE_STATSB_INFO(tx_1024byte_frames)},
+       {GBE_STATSB_INFO(net_bytes)},
+       {GBE_STATSB_INFO(rx_sof_overruns)},
+       {GBE_STATSB_INFO(rx_mof_overruns)},
+       {GBE_STATSB_INFO(rx_dma_overruns)},
+       /* GBE module C */
+       {GBE_STATSC_INFO(rx_good_frames)},
+       {GBE_STATSC_INFO(rx_broadcast_frames)},
+       {GBE_STATSC_INFO(rx_multicast_frames)},
+       {GBE_STATSC_INFO(rx_pause_frames)},
+       {GBE_STATSC_INFO(rx_crc_errors)},
+       {GBE_STATSC_INFO(rx_align_code_errors)},
+       {GBE_STATSC_INFO(rx_oversized_frames)},
+       {GBE_STATSC_INFO(rx_jabber_frames)},
+       {GBE_STATSC_INFO(rx_undersized_frames)},
+       {GBE_STATSC_INFO(rx_fragments)},
+       {GBE_STATSC_INFO(rx_bytes)},
+       {GBE_STATSC_INFO(tx_good_frames)},
+       {GBE_STATSC_INFO(tx_broadcast_frames)},
+       {GBE_STATSC_INFO(tx_multicast_frames)},
+       {GBE_STATSC_INFO(tx_pause_frames)},
+       {GBE_STATSC_INFO(tx_deferred_frames)},
+       {GBE_STATSC_INFO(tx_collision_frames)},
+       {GBE_STATSC_INFO(tx_single_coll_frames)},
+       {GBE_STATSC_INFO(tx_mult_coll_frames)},
+       {GBE_STATSC_INFO(tx_excessive_collisions)},
+       {GBE_STATSC_INFO(tx_late_collisions)},
+       {GBE_STATSC_INFO(tx_underrun)},
+       {GBE_STATSC_INFO(tx_carrier_sense_errors)},
+       {GBE_STATSC_INFO(tx_bytes)},
+       {GBE_STATSC_INFO(tx_64byte_frames)},
+       {GBE_STATSC_INFO(tx_65_to_127byte_frames)},
+       {GBE_STATSC_INFO(tx_128_to_255byte_frames)},
+       {GBE_STATSC_INFO(tx_256_to_511byte_frames)},
+       {GBE_STATSC_INFO(tx_512_to_1023byte_frames)},
+       {GBE_STATSC_INFO(tx_1024byte_frames)},
+       {GBE_STATSC_INFO(net_bytes)},
+       {GBE_STATSC_INFO(rx_sof_overruns)},
+       {GBE_STATSC_INFO(rx_mof_overruns)},
+       {GBE_STATSC_INFO(rx_dma_overruns)},
+       /* GBE module D */
+       {GBE_STATSD_INFO(rx_good_frames)},
+       {GBE_STATSD_INFO(rx_broadcast_frames)},
+       {GBE_STATSD_INFO(rx_multicast_frames)},
+       {GBE_STATSD_INFO(rx_pause_frames)},
+       {GBE_STATSD_INFO(rx_crc_errors)},
+       {GBE_STATSD_INFO(rx_align_code_errors)},
+       {GBE_STATSD_INFO(rx_oversized_frames)},
+       {GBE_STATSD_INFO(rx_jabber_frames)},
+       {GBE_STATSD_INFO(rx_undersized_frames)},
+       {GBE_STATSD_INFO(rx_fragments)},
+       {GBE_STATSD_INFO(rx_bytes)},
+       {GBE_STATSD_INFO(tx_good_frames)},
+       {GBE_STATSD_INFO(tx_broadcast_frames)},
+       {GBE_STATSD_INFO(tx_multicast_frames)},
+       {GBE_STATSD_INFO(tx_pause_frames)},
+       {GBE_STATSD_INFO(tx_deferred_frames)},
+       {GBE_STATSD_INFO(tx_collision_frames)},
+       {GBE_STATSD_INFO(tx_single_coll_frames)},
+       {GBE_STATSD_INFO(tx_mult_coll_frames)},
+       {GBE_STATSD_INFO(tx_excessive_collisions)},
+       {GBE_STATSD_INFO(tx_late_collisions)},
+       {GBE_STATSD_INFO(tx_underrun)},
+       {GBE_STATSD_INFO(tx_carrier_sense_errors)},
+       {GBE_STATSD_INFO(tx_bytes)},
+       {GBE_STATSD_INFO(tx_64byte_frames)},
+       {GBE_STATSD_INFO(tx_65_to_127byte_frames)},
+       {GBE_STATSD_INFO(tx_128_to_255byte_frames)},
+       {GBE_STATSD_INFO(tx_256_to_511byte_frames)},
+       {GBE_STATSD_INFO(tx_512_to_1023byte_frames)},
+       {GBE_STATSD_INFO(tx_1024byte_frames)},
+       {GBE_STATSD_INFO(net_bytes)},
+       {GBE_STATSD_INFO(rx_sof_overruns)},
+       {GBE_STATSD_INFO(rx_mof_overruns)},
+       {GBE_STATSD_INFO(rx_dma_overruns)},
+};
+
+#define XGBE_STATS0_INFO(field)        "GBE_0:"#field, XGBE_STATS0_MODULE, \
+                               FIELD_SIZEOF(struct xgbe_hw_stats, field), \
+                               offsetof(struct xgbe_hw_stats, field)
+
+#define XGBE_STATS1_INFO(field)        "GBE_1:"#field, XGBE_STATS1_MODULE, \
+                               FIELD_SIZEOF(struct xgbe_hw_stats, field), \
+                               offsetof(struct xgbe_hw_stats, field)
+
+#define XGBE_STATS2_INFO(field)        "GBE_2:"#field, XGBE_STATS2_MODULE, \
+                               FIELD_SIZEOF(struct xgbe_hw_stats, field), \
+                               offsetof(struct xgbe_hw_stats, field)
+
+static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
+       /* GBE module 0 */
+       {XGBE_STATS0_INFO(rx_good_frames)},
+       {XGBE_STATS0_INFO(rx_broadcast_frames)},
+       {XGBE_STATS0_INFO(rx_multicast_frames)},
+       {XGBE_STATS0_INFO(rx_oversized_frames)},
+       {XGBE_STATS0_INFO(rx_undersized_frames)},
+       {XGBE_STATS0_INFO(overrun_type4)},
+       {XGBE_STATS0_INFO(overrun_type5)},
+       {XGBE_STATS0_INFO(rx_bytes)},
+       {XGBE_STATS0_INFO(tx_good_frames)},
+       {XGBE_STATS0_INFO(tx_broadcast_frames)},
+       {XGBE_STATS0_INFO(tx_multicast_frames)},
+       {XGBE_STATS0_INFO(tx_bytes)},
+       {XGBE_STATS0_INFO(tx_64byte_frames)},
+       {XGBE_STATS0_INFO(tx_65_to_127byte_frames)},
+       {XGBE_STATS0_INFO(tx_128_to_255byte_frames)},
+       {XGBE_STATS0_INFO(tx_256_to_511byte_frames)},
+       {XGBE_STATS0_INFO(tx_512_to_1023byte_frames)},
+       {XGBE_STATS0_INFO(tx_1024byte_frames)},
+       {XGBE_STATS0_INFO(net_bytes)},
+       {XGBE_STATS0_INFO(rx_sof_overruns)},
+       {XGBE_STATS0_INFO(rx_mof_overruns)},
+       {XGBE_STATS0_INFO(rx_dma_overruns)},
+       /* XGBE module 1 */
+       {XGBE_STATS1_INFO(rx_good_frames)},
+       {XGBE_STATS1_INFO(rx_broadcast_frames)},
+       {XGBE_STATS1_INFO(rx_multicast_frames)},
+       {XGBE_STATS1_INFO(rx_pause_frames)},
+       {XGBE_STATS1_INFO(rx_crc_errors)},
+       {XGBE_STATS1_INFO(rx_align_code_errors)},
+       {XGBE_STATS1_INFO(rx_oversized_frames)},
+       {XGBE_STATS1_INFO(rx_jabber_frames)},
+       {XGBE_STATS1_INFO(rx_undersized_frames)},
+       {XGBE_STATS1_INFO(rx_fragments)},
+       {XGBE_STATS1_INFO(overrun_type4)},
+       {XGBE_STATS1_INFO(overrun_type5)},
+       {XGBE_STATS1_INFO(rx_bytes)},
+       {XGBE_STATS1_INFO(tx_good_frames)},
+       {XGBE_STATS1_INFO(tx_broadcast_frames)},
+       {XGBE_STATS1_INFO(tx_multicast_frames)},
+       {XGBE_STATS1_INFO(tx_pause_frames)},
+       {XGBE_STATS1_INFO(tx_deferred_frames)},
+       {XGBE_STATS1_INFO(tx_collision_frames)},
+       {XGBE_STATS1_INFO(tx_single_coll_frames)},
+       {XGBE_STATS1_INFO(tx_mult_coll_frames)},
+       {XGBE_STATS1_INFO(tx_excessive_collisions)},
+       {XGBE_STATS1_INFO(tx_late_collisions)},
+       {XGBE_STATS1_INFO(tx_underrun)},
+       {XGBE_STATS1_INFO(tx_carrier_sense_errors)},
+       {XGBE_STATS1_INFO(tx_bytes)},
+       {XGBE_STATS1_INFO(tx_64byte_frames)},
+       {XGBE_STATS1_INFO(tx_65_to_127byte_frames)},
+       {XGBE_STATS1_INFO(tx_128_to_255byte_frames)},
+       {XGBE_STATS1_INFO(tx_256_to_511byte_frames)},
+       {XGBE_STATS1_INFO(tx_512_to_1023byte_frames)},
+       {XGBE_STATS1_INFO(tx_1024byte_frames)},
+       {XGBE_STATS1_INFO(net_bytes)},
+       {XGBE_STATS1_INFO(rx_sof_overruns)},
+       {XGBE_STATS1_INFO(rx_mof_overruns)},
+       {XGBE_STATS1_INFO(rx_dma_overruns)},
+       /* XGBE module 2 */
+       {XGBE_STATS2_INFO(rx_good_frames)},
+       {XGBE_STATS2_INFO(rx_broadcast_frames)},
+       {XGBE_STATS2_INFO(rx_multicast_frames)},
+       {XGBE_STATS2_INFO(rx_pause_frames)},
+       {XGBE_STATS2_INFO(rx_crc_errors)},
+       {XGBE_STATS2_INFO(rx_align_code_errors)},
+       {XGBE_STATS2_INFO(rx_oversized_frames)},
+       {XGBE_STATS2_INFO(rx_jabber_frames)},
+       {XGBE_STATS2_INFO(rx_undersized_frames)},
+       {XGBE_STATS2_INFO(rx_fragments)},
+       {XGBE_STATS2_INFO(overrun_type4)},
+       {XGBE_STATS2_INFO(overrun_type5)},
+       {XGBE_STATS2_INFO(rx_bytes)},
+       {XGBE_STATS2_INFO(tx_good_frames)},
+       {XGBE_STATS2_INFO(tx_broadcast_frames)},
+       {XGBE_STATS2_INFO(tx_multicast_frames)},
+       {XGBE_STATS2_INFO(tx_pause_frames)},
+       {XGBE_STATS2_INFO(tx_deferred_frames)},
+       {XGBE_STATS2_INFO(tx_collision_frames)},
+       {XGBE_STATS2_INFO(tx_single_coll_frames)},
+       {XGBE_STATS2_INFO(tx_mult_coll_frames)},
+       {XGBE_STATS2_INFO(tx_excessive_collisions)},
+       {XGBE_STATS2_INFO(tx_late_collisions)},
+       {XGBE_STATS2_INFO(tx_underrun)},
+       {XGBE_STATS2_INFO(tx_carrier_sense_errors)},
+       {XGBE_STATS2_INFO(tx_bytes)},
+       {XGBE_STATS2_INFO(tx_64byte_frames)},
+       {XGBE_STATS2_INFO(tx_65_to_127byte_frames)},
+       {XGBE_STATS2_INFO(tx_128_to_255byte_frames)},
+       {XGBE_STATS2_INFO(tx_256_to_511byte_frames)},
+       {XGBE_STATS2_INFO(tx_512_to_1023byte_frames)},
+       {XGBE_STATS2_INFO(tx_1024byte_frames)},
+       {XGBE_STATS2_INFO(net_bytes)},
+       {XGBE_STATS2_INFO(rx_sof_overruns)},
+       {XGBE_STATS2_INFO(rx_mof_overruns)},
+       {XGBE_STATS2_INFO(rx_dma_overruns)},
+};
+
+#define for_each_intf(i, priv) \
+       list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
+
+#define for_each_sec_slave(slave, priv) \
+       list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
+
+#define first_sec_slave(priv)                                  \
+       list_first_entry(&priv->secondary_slaves, \
+                       struct gbe_slave, slave_list)
+
+static void keystone_get_drvinfo(struct net_device *ndev,
+                                struct ethtool_drvinfo *info)
+{
+       strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
+       strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
+}
+
+static u32 keystone_get_msglevel(struct net_device *ndev)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+
+       return netcp->msg_enable;
+}
+
+static void keystone_set_msglevel(struct net_device *ndev, u32 value)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+
+       netcp->msg_enable = value;
+}
+
+static void keystone_get_stat_strings(struct net_device *ndev,
+                                     uint32_t stringset, uint8_t *data)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct gbe_intf *gbe_intf;
+       struct gbe_priv *gbe_dev;
+       int i;
+
+       gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+       if (!gbe_intf)
+               return;
+       gbe_dev = gbe_intf->gbe_dev;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < gbe_dev->num_et_stats; i++) {
+                       memcpy(data, gbe_dev->et_stats[i].desc,
+                              ETH_GSTRING_LEN);
+                       data += ETH_GSTRING_LEN;
+               }
+               break;
+       case ETH_SS_TEST:
+               break;
+       }
+}
+
+static int keystone_get_sset_count(struct net_device *ndev, int stringset)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct gbe_intf *gbe_intf;
+       struct gbe_priv *gbe_dev;
+
+       gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+       if (!gbe_intf)
+               return -EINVAL;
+       gbe_dev = gbe_intf->gbe_dev;
+
+       switch (stringset) {
+       case ETH_SS_TEST:
+               return 0;
+       case ETH_SS_STATS:
+               return gbe_dev->num_et_stats;
+       default:
+               return -EINVAL;
+       }
+}
+
+static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+       void __iomem *base = NULL;
+       u32  __iomem *p;
+       u32 tmp = 0;
+       int i;
+
+       for (i = 0; i < gbe_dev->num_et_stats; i++) {
+               base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
+               p = base + gbe_dev->et_stats[i].offset;
+               tmp = readl(p);
+               gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
+               if (data)
+                       data[i] = gbe_dev->hw_stats[i];
+               /* write-to-decrement:
+                * new register value = old register value - write value
+                */
+               writel(tmp, p);
+       }
+}
+
+static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+       void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
+       void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
+       u64 *hw_stats = &gbe_dev->hw_stats[0];
+       void __iomem *base = NULL;
+       u32  __iomem *p;
+       u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
+       int i, j, pair;
+
+       for (pair = 0; pair < 2; pair++) {
+               val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+
+               if (pair == 0)
+                       val &= ~GBE_STATS_CD_SEL;
+               else
+                       val |= GBE_STATS_CD_SEL;
+
+               /* make the stat modules visible */
+               writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+
+               for (i = 0; i < pair_size; i++) {
+                       j = pair * pair_size + i;
+                       switch (gbe_dev->et_stats[j].type) {
+                       case GBE_STATSA_MODULE:
+                       case GBE_STATSC_MODULE:
+                               base = gbe_statsa;
+                       break;
+                       case GBE_STATSB_MODULE:
+                       case GBE_STATSD_MODULE:
+                               base  = gbe_statsb;
+                       break;
+                       }
+
+                       p = base + gbe_dev->et_stats[j].offset;
+                       tmp = readl(p);
+                       hw_stats[j] += tmp;
+                       if (data)
+                               data[j] = hw_stats[j];
+                       /* write-to-decrement:
+                        * new register value = old register value - write value
+                        */
+                       writel(tmp, p);
+               }
+       }
+}
+
+static void keystone_get_ethtool_stats(struct net_device *ndev,
+                                      struct ethtool_stats *stats,
+                                      uint64_t *data)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct gbe_intf *gbe_intf;
+       struct gbe_priv *gbe_dev;
+
+       gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+       if (!gbe_intf)
+               return;
+
+       gbe_dev = gbe_intf->gbe_dev;
+       spin_lock_bh(&gbe_dev->hw_stats_lock);
+       if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+               gbe_update_stats_ver14(gbe_dev, data);
+       else
+               gbe_update_stats(gbe_dev, data);
+       spin_unlock_bh(&gbe_dev->hw_stats_lock);
+}
+
+static int keystone_get_settings(struct net_device *ndev,
+                                struct ethtool_cmd *cmd)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct phy_device *phy = ndev->phydev;
+       struct gbe_intf *gbe_intf;
+       int ret;
+
+       if (!phy)
+               return -EINVAL;
+
+       gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+       if (!gbe_intf)
+               return -EINVAL;
+
+       if (!gbe_intf->slave)
+               return -EINVAL;
+
+       ret = phy_ethtool_gset(phy, cmd);
+       if (!ret)
+               cmd->port = gbe_intf->slave->phy_port_t;
+
+       return ret;
+}
+
+static int keystone_set_settings(struct net_device *ndev,
+                                struct ethtool_cmd *cmd)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct phy_device *phy = ndev->phydev;
+       struct gbe_intf *gbe_intf;
+       u32 features = cmd->advertising & cmd->supported;
+
+       if (!phy)
+               return -EINVAL;
+
+       gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+       if (!gbe_intf)
+               return -EINVAL;
+
+       if (!gbe_intf->slave)
+               return -EINVAL;
+
+       if (cmd->port != gbe_intf->slave->phy_port_t) {
+               if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP))
+                       return -EINVAL;
+
+               if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI))
+                       return -EINVAL;
+
+               if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC))
+                       return -EINVAL;
+
+               if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII))
+                       return -EINVAL;
+
+               if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
+                       return -EINVAL;
+       }
+
+       gbe_intf->slave->phy_port_t = cmd->port;
+       return phy_ethtool_sset(phy, cmd);
+}
+
+static const struct ethtool_ops keystone_ethtool_ops = {
+       .get_drvinfo            = keystone_get_drvinfo,
+       .get_link               = ethtool_op_get_link,
+       .get_msglevel           = keystone_get_msglevel,
+       .set_msglevel           = keystone_set_msglevel,
+       .get_strings            = keystone_get_stat_strings,
+       .get_sset_count         = keystone_get_sset_count,
+       .get_ethtool_stats      = keystone_get_ethtool_stats,
+       .get_settings           = keystone_get_settings,
+       .set_settings           = keystone_set_settings,
+};
+
+#define mac_hi(mac)    (((mac)[0] << 0) | ((mac)[1] << 8) |    \
+                        ((mac)[2] << 16) | ((mac)[3] << 24))
+#define mac_lo(mac)    (((mac)[4] << 0) | ((mac)[5] << 8))
+
+static void gbe_set_slave_mac(struct gbe_slave *slave,
+                             struct gbe_intf *gbe_intf)
+{
+       struct net_device *ndev = gbe_intf->ndev;
+
+       writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
+       writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
+}
+
+static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
+{
+       if (priv->host_port == 0)
+               return slave_num + 1;
+
+       return slave_num;
+}
+
+static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
+                                         struct net_device *ndev,
+                                         struct gbe_slave *slave,
+                                         int up)
+{
+       struct phy_device *phy = slave->phy;
+       u32 mac_control = 0;
+
+       if (up) {
+               mac_control = slave->mac_control;
+               if (phy && (phy->speed == SPEED_1000)) {
+                       mac_control |= MACSL_GIG_MODE;
+                       mac_control &= ~MACSL_XGIG_MODE;
+               } else if (phy && (phy->speed == SPEED_10000)) {
+                       mac_control |= MACSL_XGIG_MODE;
+                       mac_control &= ~MACSL_GIG_MODE;
+               }
+
+               writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
+                                                mac_control));
+
+               cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
+                                    ALE_PORT_STATE,
+                                    ALE_PORT_STATE_FORWARD);
+
+               if (ndev && slave->open)
+                       netif_carrier_on(ndev);
+       } else {
+               writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
+                                                mac_control));
+               cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
+                                    ALE_PORT_STATE,
+                                    ALE_PORT_STATE_DISABLE);
+               if (ndev)
+                       netif_carrier_off(ndev);
+       }
+
+       if (phy)
+               phy_print_status(phy);
+}
+
+static bool gbe_phy_link_status(struct gbe_slave *slave)
+{
+        return !slave->phy || slave->phy->link;
+}
+
+static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
+                                         struct gbe_slave *slave,
+                                         struct net_device *ndev)
+{
+       int sp = slave->slave_num;
+       int phy_link_state, sgmii_link_state = 1, link_state;
+
+       if (!slave->open)
+               return;
+
+       if (!SLAVE_LINK_IS_XGMII(slave))
+               sgmii_link_state = netcp_sgmii_get_port_link(SGMII_BASE(sp),
+                                                            sp);
+       phy_link_state = gbe_phy_link_status(slave);
+       link_state = phy_link_state & sgmii_link_state;
+
+       if (atomic_xchg(&slave->link_state, link_state) != link_state)
+               netcp_ethss_link_state_action(gbe_dev, ndev, slave,
+                                             link_state);
+}
+
+static void xgbe_adjust_link(struct net_device *ndev)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct gbe_intf *gbe_intf;
+
+       gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
+       if (!gbe_intf)
+               return;
+
+       netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
+                                     ndev);
+}
+
+static void gbe_adjust_link(struct net_device *ndev)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct gbe_intf *gbe_intf;
+
+       gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+       if (!gbe_intf)
+               return;
+
+       netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
+                                     ndev);
+}
+
+static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
+{
+       struct gbe_priv *gbe_dev = netdev_priv(ndev);
+       struct gbe_slave *slave;
+
+       for_each_sec_slave(slave, gbe_dev)
+               netcp_ethss_update_link_state(gbe_dev, slave, NULL);
+}
+
+/* Reset EMAC
+ * Soft reset is set and polled until clear, or until a timeout occurs
+ */
+static int gbe_port_reset(struct gbe_slave *slave)
+{
+       u32 i, v;
+
+       /* Set the soft reset bit */
+       writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
+
+       /* Wait for the bit to clear */
+       for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
+               v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
+               if ((v & SOFT_RESET_MASK) != SOFT_RESET)
+                       return 0;
+       }
+
+       /* Timeout on the reset */
+       return GMACSL_RET_WARN_RESET_INCOMPLETE;
+}
+
+/* Configure EMAC */
+static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
+                           int max_rx_len)
+{
+       u32 xgmii_mode;
+
+       if (max_rx_len > NETCP_MAX_FRAME_SIZE)
+               max_rx_len = NETCP_MAX_FRAME_SIZE;
+
+       /* Enable correct MII mode at SS level */
+       if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
+           (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
+               xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
+               xgmii_mode |= (1 << slave->slave_num);
+               writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
+       }
+
+       writel(max_rx_len, GBE_REG_ADDR(slave, emac_regs, rx_maxlen));
+       writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
+}
+
+static void gbe_slave_stop(struct gbe_intf *intf)
+{
+       struct gbe_priv *gbe_dev = intf->gbe_dev;
+       struct gbe_slave *slave = intf->slave;
+
+       gbe_port_reset(slave);
+       /* Disable forwarding */
+       cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
+                            ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+       cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
+                          1 << slave->port_num, 0, 0);
+
+       if (!slave->phy)
+               return;
+
+       phy_stop(slave->phy);
+       phy_disconnect(slave->phy);
+       slave->phy = NULL;
+}
+
+static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
+{
+       void __iomem *sgmii_port_regs;
+
+       sgmii_port_regs = priv->sgmii_port_regs;
+       if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
+               sgmii_port_regs = priv->sgmii_port34_regs;
+
+       if (!SLAVE_LINK_IS_XGMII(slave)) {
+               netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
+               netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
+                                  slave->link_interface);
+       }
+}
+
+static int gbe_slave_open(struct gbe_intf *gbe_intf)
+{
+       struct gbe_priv *priv = gbe_intf->gbe_dev;
+       struct gbe_slave *slave = gbe_intf->slave;
+       phy_interface_t phy_mode;
+       bool has_phy = false;
+
+       void (*hndlr)(struct net_device *) = gbe_adjust_link;
+
+       gbe_sgmii_config(priv, slave);
+       gbe_port_reset(slave);
+       gbe_port_config(priv, slave, priv->rx_packet_max);
+       gbe_set_slave_mac(slave, gbe_intf);
+       /* enable forwarding */
+       cpsw_ale_control_set(priv->ale, slave->port_num,
+                            ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+       cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
+                          1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
+
+       if (slave->link_interface == SGMII_LINK_MAC_PHY) {
+               has_phy = true;
+               phy_mode = PHY_INTERFACE_MODE_SGMII;
+               slave->phy_port_t = PORT_MII;
+       } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
+               has_phy = true;
+               phy_mode = PHY_INTERFACE_MODE_NA;
+               slave->phy_port_t = PORT_FIBRE;
+       }
+
+       if (has_phy) {
+               if (priv->ss_version == XGBE_SS_VERSION_10)
+                       hndlr = xgbe_adjust_link;
+
+               slave->phy = of_phy_connect(gbe_intf->ndev,
+                                           slave->phy_node,
+                                           hndlr, 0,
+                                           phy_mode);
+               if (!slave->phy) {
+                       dev_err(priv->dev, "phy not found on slave %d\n",
+                               slave->slave_num);
+                       return -ENODEV;
+               }
+               dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
+                       dev_name(&slave->phy->dev));
+               phy_start(slave->phy);
+               phy_read_status(slave->phy);
+       }
+       return 0;
+}
+
+static void gbe_init_host_port(struct gbe_priv *priv)
+{
+       int bypass_en = 1;
+       /* Max length register */
+       writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
+                                                 rx_maxlen));
+
+       cpsw_ale_start(priv->ale);
+
+       if (priv->enable_ale)
+               bypass_en = 0;
+
+       cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
+
+       cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
+
+       cpsw_ale_control_set(priv->ale, priv->host_port,
+                            ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+       cpsw_ale_control_set(priv->ale, 0,
+                            ALE_PORT_UNKNOWN_VLAN_MEMBER,
+                            GBE_PORT_MASK(priv->ale_ports));
+
+       cpsw_ale_control_set(priv->ale, 0,
+                            ALE_PORT_UNKNOWN_MCAST_FLOOD,
+                            GBE_PORT_MASK(priv->ale_ports - 1));
+
+       cpsw_ale_control_set(priv->ale, 0,
+                            ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
+                            GBE_PORT_MASK(priv->ale_ports));
+
+       cpsw_ale_control_set(priv->ale, 0,
+                            ALE_PORT_UNTAGGED_EGRESS,
+                            GBE_PORT_MASK(priv->ale_ports));
+}
+
+static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+       struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+       u16 vlan_id;
+
+       cpsw_ale_add_mcast(gbe_dev->ale, addr,
+                          GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
+                          ALE_MCAST_FWD_2);
+       for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
+               cpsw_ale_add_mcast(gbe_dev->ale, addr,
+                                  GBE_PORT_MASK(gbe_dev->ale_ports),
+                                  ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
+       }
+}
+
+static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+       struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+       u16 vlan_id;
+
+       cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
+
+       for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
+               cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
+                                  ALE_VLAN, vlan_id);
+}
+
+static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+       struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+       u16 vlan_id;
+
+       cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
+
+       for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
+               cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
+       }
+}
+
+static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+       struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+       u16 vlan_id;
+
+       cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
+
+       for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
+               cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
+                                  ALE_VLAN, vlan_id);
+       }
+}
+
+static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
+{
+       struct gbe_intf *gbe_intf = intf_priv;
+       struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+       dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
+               naddr->addr, naddr->type);
+
+       switch (naddr->type) {
+       case ADDR_MCAST:
+       case ADDR_BCAST:
+               gbe_add_mcast_addr(gbe_intf, naddr->addr);
+               break;
+       case ADDR_UCAST:
+       case ADDR_DEV:
+               gbe_add_ucast_addr(gbe_intf, naddr->addr);
+               break;
+       case ADDR_ANY:
+               /* nothing to do for promiscuous */
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
+{
+       struct gbe_intf *gbe_intf = intf_priv;
+       struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+       dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
+               naddr->addr, naddr->type);
+
+       switch (naddr->type) {
+       case ADDR_MCAST:
+       case ADDR_BCAST:
+               gbe_del_mcast_addr(gbe_intf, naddr->addr);
+               break;
+       case ADDR_UCAST:
+       case ADDR_DEV:
+               gbe_del_ucast_addr(gbe_intf, naddr->addr);
+               break;
+       case ADDR_ANY:
+               /* nothing to do for promiscuous */
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int gbe_add_vid(void *intf_priv, int vid)
+{
+       struct gbe_intf *gbe_intf = intf_priv;
+       struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+       set_bit(vid, gbe_intf->active_vlans);
+
+       cpsw_ale_add_vlan(gbe_dev->ale, vid,
+                         GBE_PORT_MASK(gbe_dev->ale_ports),
+                         GBE_MASK_NO_PORTS,
+                         GBE_PORT_MASK(gbe_dev->ale_ports),
+                         GBE_PORT_MASK(gbe_dev->ale_ports - 1));
+
+       return 0;
+}
+
+static int gbe_del_vid(void *intf_priv, int vid)
+{
+       struct gbe_intf *gbe_intf = intf_priv;
+       struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+       cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
+       clear_bit(vid, gbe_intf->active_vlans);
+       return 0;
+}
+
+static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
+{
+       struct gbe_intf *gbe_intf = intf_priv;
+       struct phy_device *phy = gbe_intf->slave->phy;
+       int ret = -EOPNOTSUPP;
+
+       if (phy)
+               ret = phy_mii_ioctl(phy, req, cmd);
+
+       return ret;
+}
+
+static void netcp_ethss_timer(unsigned long arg)
+{
+       struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
+       struct gbe_intf *gbe_intf;
+       struct gbe_slave *slave;
+
+       /* Check & update SGMII link state of interfaces */
+       for_each_intf(gbe_intf, gbe_dev) {
+               if (!gbe_intf->slave->open)
+                       continue;
+               netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
+                                             gbe_intf->ndev);
+       }
+
+       /* Check & update SGMII link state of secondary ports */
+       for_each_sec_slave(slave, gbe_dev) {
+               netcp_ethss_update_link_state(gbe_dev, slave, NULL);
+       }
+
+       spin_lock_bh(&gbe_dev->hw_stats_lock);
+
+       if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+               gbe_update_stats_ver14(gbe_dev, NULL);
+       else
+               gbe_update_stats(gbe_dev, NULL);
+
+       spin_unlock_bh(&gbe_dev->hw_stats_lock);
+
+       gbe_dev->timer.expires  = jiffies + GBE_TIMER_INTERVAL;
+       add_timer(&gbe_dev->timer);
+}
+
+static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info)
+{
+       struct gbe_intf *gbe_intf = data;
+
+       p_info->tx_pipe = &gbe_intf->tx_pipe;
+       return 0;
+}
+
+static int gbe_open(void *intf_priv, struct net_device *ndev)
+{
+       struct gbe_intf *gbe_intf = intf_priv;
+       struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct gbe_slave *slave = gbe_intf->slave;
+       int port_num = slave->port_num;
+       u32 reg;
+       int ret;
+
+       reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
+       dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
+               GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
+               GBE_RTL_VERSION(reg), GBE_IDENT(reg));
+
+       if (gbe_dev->enable_ale)
+               gbe_intf->tx_pipe.dma_psflags = 0;
+       else
+               gbe_intf->tx_pipe.dma_psflags = port_num;
+
+       dev_dbg(gbe_dev->dev, "opened TX channel %s: %p with psflags %d\n",
+               gbe_intf->tx_pipe.dma_chan_name,
+               gbe_intf->tx_pipe.dma_channel,
+               gbe_intf->tx_pipe.dma_psflags);
+
+       gbe_slave_stop(gbe_intf);
+
+       /* disable priority elevation and enable statistics on all ports */
+       writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
+
+       /* Control register */
+       writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
+
+       /* All statistics enabled and STAT AB visible by default */
+       writel(GBE_REG_VAL_STAT_ENABLE_ALL, GBE_REG_ADDR(gbe_dev, switch_regs,
+                                                        stat_port_en));
+
+       ret = gbe_slave_open(gbe_intf);
+       if (ret)
+               goto fail;
+
+       netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
+                             gbe_intf);
+
+       slave->open = true;
+       netcp_ethss_update_link_state(gbe_dev, slave, ndev);
+       return 0;
+
+fail:
+       gbe_slave_stop(gbe_intf);
+       return ret;
+}
+
+static int gbe_close(void *intf_priv, struct net_device *ndev)
+{
+       struct gbe_intf *gbe_intf = intf_priv;
+       struct netcp_intf *netcp = netdev_priv(ndev);
+
+       gbe_slave_stop(gbe_intf);
+       netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
+                               gbe_intf);
+
+       gbe_intf->slave->open = false;
+       atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
+       return 0;
+}
+
+static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
+                     struct device_node *node)
+{
+       int port_reg_num;
+       u32 port_reg_ofs, emac_reg_ofs;
+
+       if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
+               dev_err(gbe_dev->dev, "missing slave-port parameter\n");
+               return -EINVAL;
+       }
+
+       if (of_property_read_u32(node, "link-interface",
+                                &slave->link_interface)) {
+               dev_warn(gbe_dev->dev,
+                        "missing link-interface value defaulting to 1G mac-phy link\n");
+               slave->link_interface = SGMII_LINK_MAC_PHY;
+       }
+
+       slave->open = false;
+       slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
+       slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
+
+       if (slave->link_interface >= XGMII_LINK_MAC_PHY)
+               slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
+       else
+               slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
+
+       /* Emac regs memmap are contiguous but port regs are not */
+       port_reg_num = slave->slave_num;
+       if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
+               if (slave->slave_num > 1) {
+                       port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
+                       port_reg_num -= 2;
+               } else {
+                       port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
+               }
+       } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
+               port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
+       } else {
+               dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
+                       gbe_dev->ss_version);
+               return -EINVAL;
+       }
+
+       if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+               emac_reg_ofs = GBE13_EMAC_OFFSET;
+       else if (gbe_dev->ss_version == XGBE_SS_VERSION_10)
+               emac_reg_ofs = XGBE10_EMAC_OFFSET;
+
+       slave->port_regs = gbe_dev->ss_regs + port_reg_ofs +
+                               (0x30 * port_reg_num);
+       slave->emac_regs = gbe_dev->ss_regs + emac_reg_ofs +
+                               (0x40 * slave->slave_num);
+
+       if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
+               /* Initialize  slave port register offsets */
+               GBE_SET_REG_OFS(slave, port_regs, port_vlan);
+               GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
+               GBE_SET_REG_OFS(slave, port_regs, sa_lo);
+               GBE_SET_REG_OFS(slave, port_regs, sa_hi);
+               GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
+               GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
+               GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
+               GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
+               GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
+
+               /* Initialize EMAC register offsets */
+               GBE_SET_REG_OFS(slave, emac_regs, mac_control);
+               GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
+               GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
+
+       } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
+               /* Initialize  slave port register offsets */
+               XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
+               XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
+               XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
+               XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
+               XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
+               XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
+               XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
+               XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
+               XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
+
+               /* Initialize EMAC register offsets */
+               XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
+               XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
+               XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
+       }
+
+       atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
+       return 0;
+}
+
+static void init_secondary_ports(struct gbe_priv *gbe_dev,
+                                struct device_node *node)
+{
+       struct device *dev = gbe_dev->dev;
+       phy_interface_t phy_mode;
+       struct gbe_priv **priv;
+       struct device_node *port;
+       struct gbe_slave *slave;
+       bool mac_phy_link = false;
+
+       for_each_child_of_node(node, port) {
+               slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
+               if (!slave) {
+                       dev_err(dev,
+                               "memomry alloc failed for secondary port(%s), skipping...\n",
+                               port->name);
+                       continue;
+               }
+
+               if (init_slave(gbe_dev, slave, port)) {
+                       dev_err(dev,
+                               "Failed to initialize secondary port(%s), skipping...\n",
+                               port->name);
+                       devm_kfree(dev, slave);
+                       continue;
+               }
+
+               gbe_sgmii_config(gbe_dev, slave);
+               gbe_port_reset(slave);
+               gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
+               list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
+               gbe_dev->num_slaves++;
+               if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
+                   (slave->link_interface == XGMII_LINK_MAC_PHY))
+                       mac_phy_link = true;
+
+               slave->open = true;
+       }
+
+       /* of_phy_connect() is needed only for MAC-PHY interface */
+       if (!mac_phy_link)
+               return;
+
+       /* Allocate dummy netdev device for attaching to phy device */
+       gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
+                                       NET_NAME_UNKNOWN, ether_setup);
+       if (!gbe_dev->dummy_ndev) {
+               dev_err(dev,
+                       "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
+               return;
+       }
+       priv = netdev_priv(gbe_dev->dummy_ndev);
+       *priv = gbe_dev;
+
+       if (slave->link_interface == SGMII_LINK_MAC_PHY) {
+               phy_mode = PHY_INTERFACE_MODE_SGMII;
+               slave->phy_port_t = PORT_MII;
+       } else {
+               phy_mode = PHY_INTERFACE_MODE_NA;
+               slave->phy_port_t = PORT_FIBRE;
+       }
+
+       for_each_sec_slave(slave, gbe_dev) {
+               if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
+                   (slave->link_interface != XGMII_LINK_MAC_PHY))
+                       continue;
+               slave->phy =
+                       of_phy_connect(gbe_dev->dummy_ndev,
+                                      slave->phy_node,
+                                      gbe_adjust_link_sec_slaves,
+                                      0, phy_mode);
+               if (!slave->phy) {
+                       dev_err(dev, "phy not found for slave %d\n",
+                               slave->slave_num);
+                       slave->phy = NULL;
+               } else {
+                       dev_dbg(dev, "phy found: id is: 0x%s\n",
+                               dev_name(&slave->phy->dev));
+                       phy_start(slave->phy);
+                       phy_read_status(slave->phy);
+               }
+       }
+}
+
+static void free_secondary_ports(struct gbe_priv *gbe_dev)
+{
+       struct gbe_slave *slave;
+
+       for (;;) {
+               slave = first_sec_slave(gbe_dev);
+               if (!slave)
+                       break;
+               if (slave->phy)
+                       phy_disconnect(slave->phy);
+               list_del(&slave->slave_list);
+       }
+       if (gbe_dev->dummy_ndev)
+               free_netdev(gbe_dev->dummy_ndev);
+}
+
+static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
+                                struct device_node *node)
+{
+       struct resource res;
+       void __iomem *regs;
+       int ret, i;
+
+       ret = of_address_to_resource(node, 0, &res);
+       if (ret) {
+               dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe subsystem regs\n",
+                       node->name);
+               return ret;
+       }
+
+       regs = devm_ioremap_resource(gbe_dev->dev, &res);
+       if (IS_ERR(regs)) {
+               dev_err(gbe_dev->dev, "Failed to map xgbe register base\n");
+               return PTR_ERR(regs);
+       }
+       gbe_dev->ss_regs = regs;
+
+       ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
+       if (ret) {
+               dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe serdes regs\n",
+                       node->name);
+               return ret;
+       }
+
+       regs = devm_ioremap_resource(gbe_dev->dev, &res);
+       if (IS_ERR(regs)) {
+               dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
+               return PTR_ERR(regs);
+       }
+       gbe_dev->xgbe_serdes_regs = regs;
+
+       gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
+                                         XGBE10_NUM_STAT_ENTRIES *
+                                         (XGBE10_NUM_SLAVES + 1) * sizeof(u64),
+                                         GFP_KERNEL);
+       if (!gbe_dev->hw_stats) {
+               dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
+               return -ENOMEM;
+       }
+
+       gbe_dev->ss_version = XGBE_SS_VERSION_10;
+       gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
+                                       XGBE10_SGMII_MODULE_OFFSET;
+       gbe_dev->switch_regs = gbe_dev->ss_regs + XGBE10_SWITCH_MODULE_OFFSET;
+       gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
+
+       for (i = 0; i < XGBE10_NUM_HW_STATS_MOD; i++)
+               gbe_dev->hw_stats_regs[i] = gbe_dev->ss_regs +
+                       XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
+
+       gbe_dev->ale_reg = gbe_dev->ss_regs + XGBE10_ALE_OFFSET;
+       gbe_dev->ale_ports = XGBE10_NUM_ALE_PORTS;
+       gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
+       gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
+       gbe_dev->et_stats = xgbe10_et_stats;
+       gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
+
+       /* Subsystem registers */
+       XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
+       XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
+
+       /* Switch module registers */
+       XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
+       XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
+       XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
+       XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
+       XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
+
+       /* Host port registers */
+       XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
+       XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
+       XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
+       return 0;
+}
+
+static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
+                                   struct device_node *node)
+{
+       struct resource res;
+       void __iomem *regs;
+       int ret;
+
+       ret = of_address_to_resource(node, 0, &res);
+       if (ret) {
+               dev_err(gbe_dev->dev, "Can't translate of node(%s) address\n",
+                       node->name);
+               return ret;
+       }
+
+       regs = devm_ioremap_resource(gbe_dev->dev, &res);
+       if (IS_ERR(regs)) {
+               dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
+               return PTR_ERR(regs);
+       }
+       gbe_dev->ss_regs = regs;
+       gbe_dev->ss_version = readl(gbe_dev->ss_regs);
+       return 0;
+}
+
+static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
+                               struct device_node *node)
+{
+       void __iomem *regs;
+       int i;
+
+       gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
+                                         GBE13_NUM_HW_STAT_ENTRIES *
+                                         GBE13_NUM_SLAVES * sizeof(u64),
+                                         GFP_KERNEL);
+       if (!gbe_dev->hw_stats) {
+               dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
+               return -ENOMEM;
+       }
+
+       regs = gbe_dev->ss_regs;
+       gbe_dev->sgmii_port_regs = regs + GBE13_SGMII_MODULE_OFFSET;
+       gbe_dev->sgmii_port34_regs = regs + GBE13_SGMII34_MODULE_OFFSET;
+       gbe_dev->switch_regs = regs + GBE13_SWITCH_MODULE_OFFSET;
+       gbe_dev->host_port_regs = regs + GBE13_HOST_PORT_OFFSET;
+
+       for (i = 0; i < GBE13_NUM_HW_STATS_MOD; i++)
+               gbe_dev->hw_stats_regs[i] = regs + GBE13_HW_STATS_OFFSET +
+                               (GBE_HW_STATS_REG_MAP_SZ * i);
+
+       gbe_dev->ale_reg = regs + GBE13_ALE_OFFSET;
+       gbe_dev->ale_ports = GBE13_NUM_ALE_PORTS;
+       gbe_dev->host_port = GBE13_HOST_PORT_NUM;
+       gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
+       gbe_dev->et_stats = gbe13_et_stats;
+       gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
+
+       /* Subsystem registers */
+       GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
+
+       /* Switch module registers */
+       GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
+       GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
+       GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
+       GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
+       GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
+       GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
+
+       /* Host port registers */
+       GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
+       GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
+       return 0;
+}
+
+static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
+                    struct device_node *node, void **inst_priv)
+{
+       struct device_node *interfaces, *interface;
+       struct device_node *secondary_ports;
+       struct cpsw_ale_params ale_params;
+       struct gbe_priv *gbe_dev;
+       u32 slave_num;
+       int ret = 0;
+
+       if (!node) {
+               dev_err(dev, "device tree info unavailable\n");
+               return -ENODEV;
+       }
+
+       gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
+       if (!gbe_dev)
+               return -ENOMEM;
+
+       gbe_dev->dev = dev;
+       gbe_dev->netcp_device = netcp_device;
+       gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
+
+       /* init the hw stats lock */
+       spin_lock_init(&gbe_dev->hw_stats_lock);
+
+       if (of_find_property(node, "enable-ale", NULL)) {
+               gbe_dev->enable_ale = true;
+               dev_info(dev, "ALE enabled\n");
+       } else {
+               gbe_dev->enable_ale = false;
+               dev_dbg(dev, "ALE bypass enabled*\n");
+       }
+
+       ret = of_property_read_u32(node, "tx-queue",
+                                  &gbe_dev->tx_queue_id);
+       if (ret < 0) {
+               dev_err(dev, "missing tx_queue parameter\n");
+               gbe_dev->tx_queue_id = GBE_TX_QUEUE;
+       }
+
+       ret = of_property_read_string(node, "tx-channel",
+                                     &gbe_dev->dma_chan_name);
+       if (ret < 0) {
+               dev_err(dev, "missing \"tx-channel\" parameter\n");
+               ret = -ENODEV;
+               goto quit;
+       }
+
+       if (!strcmp(node->name, "gbe")) {
+               ret = get_gbe_resource_version(gbe_dev, node);
+               if (ret)
+                       goto quit;
+
+               ret = set_gbe_ethss14_priv(gbe_dev, node);
+               if (ret)
+                       goto quit;
+       } else if (!strcmp(node->name, "xgbe")) {
+               ret = set_xgbe_ethss10_priv(gbe_dev, node);
+               if (ret)
+                       goto quit;
+               ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
+                                            gbe_dev->ss_regs);
+               if (ret)
+                       goto quit;
+       } else {
+               dev_err(dev, "unknown GBE node(%s)\n", node->name);
+               ret = -ENODEV;
+               goto quit;
+       }
+
+       interfaces = of_get_child_by_name(node, "interfaces");
+       if (!interfaces)
+               dev_err(dev, "could not find interfaces\n");
+
+       ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
+                               gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
+       if (ret)
+               goto quit;
+
+       ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
+       if (ret)
+               goto quit;
+
+       /* Create network interfaces */
+       INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
+       for_each_child_of_node(interfaces, interface) {
+               ret = of_property_read_u32(interface, "slave-port", &slave_num);
+               if (ret) {
+                       dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
+                               interface->name);
+                       continue;
+               }
+               gbe_dev->num_slaves++;
+       }
+
+       if (!gbe_dev->num_slaves)
+               dev_warn(dev, "No network interface configured\n");
+
+       /* Initialize Secondary slave ports */
+       secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
+       INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
+       if (secondary_ports)
+               init_secondary_ports(gbe_dev, secondary_ports);
+       of_node_put(secondary_ports);
+
+       if (!gbe_dev->num_slaves) {
+               dev_err(dev, "No network interface or secondary ports configured\n");
+               ret = -ENODEV;
+               goto quit;
+       }
+
+       memset(&ale_params, 0, sizeof(ale_params));
+       ale_params.dev          = gbe_dev->dev;
+       ale_params.ale_regs     = gbe_dev->ale_reg;
+       ale_params.ale_ageout   = GBE_DEFAULT_ALE_AGEOUT;
+       ale_params.ale_entries  = gbe_dev->ale_entries;
+       ale_params.ale_ports    = gbe_dev->ale_ports;
+
+       gbe_dev->ale = cpsw_ale_create(&ale_params);
+       if (!gbe_dev->ale) {
+               dev_err(gbe_dev->dev, "error initializing ale engine\n");
+               ret = -ENODEV;
+               goto quit;
+       } else {
+               dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
+       }
+
+       /* initialize host port */
+       gbe_init_host_port(gbe_dev);
+
+       init_timer(&gbe_dev->timer);
+       gbe_dev->timer.data      = (unsigned long)gbe_dev;
+       gbe_dev->timer.function = netcp_ethss_timer;
+       gbe_dev->timer.expires   = jiffies + GBE_TIMER_INTERVAL;
+       add_timer(&gbe_dev->timer);
+       *inst_priv = gbe_dev;
+       return 0;
+
+quit:
+       if (gbe_dev->hw_stats)
+               devm_kfree(dev, gbe_dev->hw_stats);
+       cpsw_ale_destroy(gbe_dev->ale);
+       if (gbe_dev->ss_regs)
+               devm_iounmap(dev, gbe_dev->ss_regs);
+       of_node_put(interfaces);
+       devm_kfree(dev, gbe_dev);
+       return ret;
+}
+
+static int gbe_attach(void *inst_priv, struct net_device *ndev,
+                     struct device_node *node, void **intf_priv)
+{
+       struct gbe_priv *gbe_dev = inst_priv;
+       struct gbe_intf *gbe_intf;
+       int ret;
+
+       if (!node) {
+               dev_err(gbe_dev->dev, "interface node not available\n");
+               return -ENODEV;
+       }
+
+       gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
+       if (!gbe_intf)
+               return -ENOMEM;
+
+       gbe_intf->ndev = ndev;
+       gbe_intf->dev = gbe_dev->dev;
+       gbe_intf->gbe_dev = gbe_dev;
+
+       gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
+                                       sizeof(*gbe_intf->slave),
+                                       GFP_KERNEL);
+       if (!gbe_intf->slave) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       if (init_slave(gbe_dev, gbe_intf->slave, node)) {
+               ret = -ENODEV;
+               goto fail;
+       }
+
+       gbe_intf->tx_pipe = gbe_dev->tx_pipe;
+       ndev->ethtool_ops = &keystone_ethtool_ops;
+       list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
+       *intf_priv = gbe_intf;
+       return 0;
+
+fail:
+       if (gbe_intf->slave)
+               devm_kfree(gbe_dev->dev, gbe_intf->slave);
+       if (gbe_intf)
+               devm_kfree(gbe_dev->dev, gbe_intf);
+       return ret;
+}
+
+static int gbe_release(void *intf_priv)
+{
+       struct gbe_intf *gbe_intf = intf_priv;
+
+       gbe_intf->ndev->ethtool_ops = NULL;
+       list_del(&gbe_intf->gbe_intf_list);
+       devm_kfree(gbe_intf->dev, gbe_intf->slave);
+       devm_kfree(gbe_intf->dev, gbe_intf);
+       return 0;
+}
+
+static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
+{
+       struct gbe_priv *gbe_dev = inst_priv;
+
+       del_timer_sync(&gbe_dev->timer);
+       cpsw_ale_stop(gbe_dev->ale);
+       cpsw_ale_destroy(gbe_dev->ale);
+       netcp_txpipe_close(&gbe_dev->tx_pipe);
+       free_secondary_ports(gbe_dev);
+
+       if (!list_empty(&gbe_dev->gbe_intf_head))
+               dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n");
+
+       devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
+       devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
+       memset(gbe_dev, 0x00, sizeof(*gbe_dev));
+       devm_kfree(gbe_dev->dev, gbe_dev);
+       return 0;
+}
+
+static struct netcp_module gbe_module = {
+       .name           = GBE_MODULE_NAME,
+       .owner          = THIS_MODULE,
+       .primary        = true,
+       .probe          = gbe_probe,
+       .open           = gbe_open,
+       .close          = gbe_close,
+       .remove         = gbe_remove,
+       .attach         = gbe_attach,
+       .release        = gbe_release,
+       .add_addr       = gbe_add_addr,
+       .del_addr       = gbe_del_addr,
+       .add_vid        = gbe_add_vid,
+       .del_vid        = gbe_del_vid,
+       .ioctl          = gbe_ioctl,
+};
+
+static struct netcp_module xgbe_module = {
+       .name           = XGBE_MODULE_NAME,
+       .owner          = THIS_MODULE,
+       .primary        = true,
+       .probe          = gbe_probe,
+       .open           = gbe_open,
+       .close          = gbe_close,
+       .remove         = gbe_remove,
+       .attach         = gbe_attach,
+       .release        = gbe_release,
+       .add_addr       = gbe_add_addr,
+       .del_addr       = gbe_del_addr,
+       .add_vid        = gbe_add_vid,
+       .del_vid        = gbe_del_vid,
+       .ioctl          = gbe_ioctl,
+};
+
+static int __init keystone_gbe_init(void)
+{
+       int ret;
+
+       ret = netcp_register_module(&gbe_module);
+       if (ret)
+               return ret;
+
+       ret = netcp_register_module(&xgbe_module);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+module_init(keystone_gbe_init);
+
+static void __exit keystone_gbe_exit(void)
+{
+       netcp_unregister_module(&gbe_module);
+       netcp_unregister_module(&xgbe_module);
+}
+module_exit(keystone_gbe_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
new file mode 100644 (file)
index 0000000..dbeb142
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ * SGMI module initialisation
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors:    Sandeep Nair <sandeep_n@ti.com>
+ *             Sandeep Paulraj <s-paulraj@ti.com>
+ *             Wingman Kwok <w-kwok2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "netcp.h"
+
+#define SGMII_REG_STATUS_LOCK          BIT(4)
+#define        SGMII_REG_STATUS_LINK           BIT(0)
+#define SGMII_REG_STATUS_AUTONEG       BIT(2)
+#define SGMII_REG_CONTROL_AUTONEG      BIT(0)
+
+#define SGMII23_OFFSET(x)      ((x - 2) * 0x100)
+#define SGMII_OFFSET(x)                ((x <= 1) ? (x * 0x100) : (SGMII23_OFFSET(x)))
+
+/* SGMII registers */
+#define SGMII_SRESET_REG(x)   (SGMII_OFFSET(x) + 0x004)
+#define SGMII_CTL_REG(x)      (SGMII_OFFSET(x) + 0x010)
+#define SGMII_STATUS_REG(x)   (SGMII_OFFSET(x) + 0x014)
+#define SGMII_MRADV_REG(x)    (SGMII_OFFSET(x) + 0x018)
+
+static void sgmii_write_reg(void __iomem *base, int reg, u32 val)
+{
+       writel(val, base + reg);
+}
+
+static u32 sgmii_read_reg(void __iomem *base, int reg)
+{
+       return readl(base + reg);
+}
+
+static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
+{
+       writel((readl(base + reg) | val), base + reg);
+}
+
+/* port is 0 based */
+int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
+{
+       /* Soft reset */
+       sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1);
+       while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0)
+               ;
+       return 0;
+}
+
+int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
+{
+       u32 status = 0, link = 0;
+
+       status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
+       if ((status & SGMII_REG_STATUS_LINK) != 0)
+               link = 1;
+       return link;
+}
+
+int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface)
+{
+       unsigned int i, status, mask;
+       u32 mr_adv_ability;
+       u32 control;
+
+       switch (interface) {
+       case SGMII_LINK_MAC_MAC_AUTONEG:
+               mr_adv_ability  = 0x9801;
+               control         = 0x21;
+               break;
+
+       case SGMII_LINK_MAC_PHY:
+       case SGMII_LINK_MAC_PHY_NO_MDIO:
+               mr_adv_ability  = 1;
+               control         = 1;
+               break;
+
+       case SGMII_LINK_MAC_MAC_FORCED:
+               mr_adv_ability  = 0x9801;
+               control         = 0x20;
+               break;
+
+       case SGMII_LINK_MAC_FIBER:
+               mr_adv_ability  = 0x20;
+               control         = 0x1;
+               break;
+
+       default:
+               WARN_ONCE(1, "Invalid sgmii interface: %d\n", interface);
+               return -EINVAL;
+       }
+
+       sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), 0);
+
+       /* Wait for the SerDes pll to lock */
+       for (i = 0; i < 1000; i++)  {
+               usleep_range(1000, 2000);
+               status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
+               if ((status & SGMII_REG_STATUS_LOCK) != 0)
+                       break;
+       }
+
+       if ((status & SGMII_REG_STATUS_LOCK) == 0)
+               pr_err("serdes PLL not locked\n");
+
+       sgmii_write_reg(sgmii_ofs, SGMII_MRADV_REG(port), mr_adv_ability);
+       sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), control);
+
+       mask = SGMII_REG_STATUS_LINK;
+       if (control & SGMII_REG_CONTROL_AUTONEG)
+               mask |= SGMII_REG_STATUS_AUTONEG;
+
+       for (i = 0; i < 1000; i++)  {
+               usleep_range(200, 500);
+               status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
+               if ((status & mask) == mask)
+                       break;
+       }
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/ti/netcp_xgbepcsr.c b/drivers/net/ethernet/ti/netcp_xgbepcsr.c
new file mode 100644 (file)
index 0000000..33571ac
--- /dev/null
@@ -0,0 +1,501 @@
+/*
+ * XGE PCSR module initialisation
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors:    Sandeep Nair <sandeep_n@ti.com>
+ *             WingMan Kwok <w-kwok2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "netcp.h"
+
+/* XGBE registers */
+#define XGBE_CTRL_OFFSET               0x0c
+#define XGBE_SGMII_1_OFFSET            0x0114
+#define XGBE_SGMII_2_OFFSET            0x0214
+
+/* PCS-R registers */
+#define PCSR_CPU_CTRL_OFFSET           0x1fd0
+#define POR_EN                         BIT(29)
+
+#define reg_rmw(addr, value, mask) \
+       writel(((readl(addr) & (~(mask))) | \
+                       (value & (mask))), (addr))
+
+/* bit mask of width w at offset s */
+#define MASK_WID_SH(w, s)              (((1 << w) - 1) << s)
+
+/* shift value v to offset s */
+#define VAL_SH(v, s)                   (v << s)
+
+#define PHY_A(serdes)                  0
+
+struct serdes_cfg {
+       u32 ofs;
+       u32 val;
+       u32 mask;
+};
+
+static struct serdes_cfg cfg_phyb_1p25g_156p25mhz_cmu0[] = {
+       {0x0000, 0x00800002, 0x00ff00ff},
+       {0x0014, 0x00003838, 0x0000ffff},
+       {0x0060, 0x1c44e438, 0xffffffff},
+       {0x0064, 0x00c18400, 0x00ffffff},
+       {0x0068, 0x17078200, 0xffffff00},
+       {0x006c, 0x00000014, 0x000000ff},
+       {0x0078, 0x0000c000, 0x0000ff00},
+       {0x0000, 0x00000003, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_phyb_10p3125g_156p25mhz_cmu1[] = {
+       {0x0c00, 0x00030002, 0x00ff00ff},
+       {0x0c14, 0x00005252, 0x0000ffff},
+       {0x0c28, 0x80000000, 0xff000000},
+       {0x0c2c, 0x000000f6, 0x000000ff},
+       {0x0c3c, 0x04000405, 0xff00ffff},
+       {0x0c40, 0xc0800000, 0xffff0000},
+       {0x0c44, 0x5a202062, 0xffffffff},
+       {0x0c48, 0x40040424, 0xffffffff},
+       {0x0c4c, 0x00004002, 0x0000ffff},
+       {0x0c50, 0x19001c00, 0xff00ff00},
+       {0x0c54, 0x00002100, 0x0000ff00},
+       {0x0c58, 0x00000060, 0x000000ff},
+       {0x0c60, 0x80131e7c, 0xffffffff},
+       {0x0c64, 0x8400cb02, 0xff00ffff},
+       {0x0c68, 0x17078200, 0xffffff00},
+       {0x0c6c, 0x00000016, 0x000000ff},
+       {0x0c74, 0x00000400, 0x0000ff00},
+       {0x0c78, 0x0000c000, 0x0000ff00},
+       {0x0c00, 0x00000003, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_phyb_10p3125g_16bit_lane[] = {
+       {0x0204, 0x00000080, 0x000000ff},
+       {0x0208, 0x0000920d, 0x0000ffff},
+       {0x0204, 0xfc000000, 0xff000000},
+       {0x0208, 0x00009104, 0x0000ffff},
+       {0x0210, 0x1a000000, 0xff000000},
+       {0x0214, 0x00006b58, 0x00ffffff},
+       {0x0218, 0x75800084, 0xffff00ff},
+       {0x022c, 0x00300000, 0x00ff0000},
+       {0x0230, 0x00003800, 0x0000ff00},
+       {0x024c, 0x008f0000, 0x00ff0000},
+       {0x0250, 0x30000000, 0xff000000},
+       {0x0260, 0x00000002, 0x000000ff},
+       {0x0264, 0x00000057, 0x000000ff},
+       {0x0268, 0x00575700, 0x00ffff00},
+       {0x0278, 0xff000000, 0xff000000},
+       {0x0280, 0x00500050, 0x00ff00ff},
+       {0x0284, 0x00001f15, 0x0000ffff},
+       {0x028c, 0x00006f00, 0x0000ff00},
+       {0x0294, 0x00000000, 0xffffff00},
+       {0x0298, 0x00002640, 0xff00ffff},
+       {0x029c, 0x00000003, 0x000000ff},
+       {0x02a4, 0x00000f13, 0x0000ffff},
+       {0x02a8, 0x0001b600, 0x00ffff00},
+       {0x0380, 0x00000030, 0x000000ff},
+       {0x03c0, 0x00000200, 0x0000ff00},
+       {0x03cc, 0x00000018, 0x000000ff},
+       {0x03cc, 0x00000000, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_phyb_10p3125g_comlane[] = {
+       {0x0a00, 0x00000800, 0x0000ff00},
+       {0x0a84, 0x00000000, 0x000000ff},
+       {0x0a8c, 0x00130000, 0x00ff0000},
+       {0x0a90, 0x77a00000, 0xffff0000},
+       {0x0a94, 0x00007777, 0x0000ffff},
+       {0x0b08, 0x000f0000, 0xffff0000},
+       {0x0b0c, 0x000f0000, 0x00ffffff},
+       {0x0b10, 0xbe000000, 0xff000000},
+       {0x0b14, 0x000000ff, 0x000000ff},
+       {0x0b18, 0x00000014, 0x000000ff},
+       {0x0b5c, 0x981b0000, 0xffff0000},
+       {0x0b64, 0x00001100, 0x0000ff00},
+       {0x0b78, 0x00000c00, 0x0000ff00},
+       {0x0abc, 0xff000000, 0xff000000},
+       {0x0ac0, 0x0000008b, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_cm_c1_c2[] = {
+       {0x0208, 0x00000000, 0x00000f00},
+       {0x0208, 0x00000000, 0x0000001f},
+       {0x0204, 0x00000000, 0x00040000},
+       {0x0208, 0x000000a0, 0x000000e0},
+};
+
+static void netcp_xgbe_serdes_cmu_init(void __iomem *serdes_regs)
+{
+       int i;
+
+       /* cmu0 setup */
+       for (i = 0; i < ARRAY_SIZE(cfg_phyb_1p25g_156p25mhz_cmu0); i++) {
+               reg_rmw(serdes_regs + cfg_phyb_1p25g_156p25mhz_cmu0[i].ofs,
+                       cfg_phyb_1p25g_156p25mhz_cmu0[i].val,
+                       cfg_phyb_1p25g_156p25mhz_cmu0[i].mask);
+       }
+
+       /* cmu1 setup */
+       for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_156p25mhz_cmu1); i++) {
+               reg_rmw(serdes_regs + cfg_phyb_10p3125g_156p25mhz_cmu1[i].ofs,
+                       cfg_phyb_10p3125g_156p25mhz_cmu1[i].val,
+                       cfg_phyb_10p3125g_156p25mhz_cmu1[i].mask);
+       }
+}
+
+/* lane is 0 based */
+static void netcp_xgbe_serdes_lane_config(
+                       void __iomem *serdes_regs, int lane)
+{
+       int i;
+
+       /* lane setup */
+       for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_16bit_lane); i++) {
+               reg_rmw(serdes_regs +
+                               cfg_phyb_10p3125g_16bit_lane[i].ofs +
+                               (0x200 * lane),
+                       cfg_phyb_10p3125g_16bit_lane[i].val,
+                       cfg_phyb_10p3125g_16bit_lane[i].mask);
+       }
+
+       /* disable auto negotiation*/
+       reg_rmw(serdes_regs + (0x200 * lane) + 0x0380,
+               0x00000000, 0x00000010);
+
+       /* disable link training */
+       reg_rmw(serdes_regs + (0x200 * lane) + 0x03c0,
+               0x00000000, 0x00000200);
+}
+
+static void netcp_xgbe_serdes_com_enable(void __iomem *serdes_regs)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_comlane); i++) {
+               reg_rmw(serdes_regs + cfg_phyb_10p3125g_comlane[i].ofs,
+                       cfg_phyb_10p3125g_comlane[i].val,
+                       cfg_phyb_10p3125g_comlane[i].mask);
+       }
+}
+
+static void netcp_xgbe_serdes_lane_enable(
+                       void __iomem *serdes_regs, int lane)
+{
+       /* Set Lane Control Rate */
+       writel(0xe0e9e038, serdes_regs + 0x1fe0 + (4 * lane));
+}
+
+static void netcp_xgbe_serdes_phyb_rst_clr(void __iomem *serdes_regs)
+{
+       reg_rmw(serdes_regs + 0x0a00, 0x0000001f, 0x000000ff);
+}
+
+static void netcp_xgbe_serdes_pll_disable(void __iomem *serdes_regs)
+{
+       writel(0x88000000, serdes_regs + 0x1ff4);
+}
+
+static void netcp_xgbe_serdes_pll_enable(void __iomem *serdes_regs)
+{
+       netcp_xgbe_serdes_phyb_rst_clr(serdes_regs);
+       writel(0xee000000, serdes_regs + 0x1ff4);
+}
+
+static int netcp_xgbe_wait_pll_locked(void __iomem *sw_regs)
+{
+       unsigned long timeout;
+       int ret = 0;
+       u32 val_1, val_0;
+
+       timeout = jiffies + msecs_to_jiffies(500);
+       do {
+               val_0 = (readl(sw_regs + XGBE_SGMII_1_OFFSET) & BIT(4));
+               val_1 = (readl(sw_regs + XGBE_SGMII_2_OFFSET) & BIT(4));
+
+               if (val_1 && val_0)
+                       return 0;
+
+               if (time_after(jiffies, timeout)) {
+                       ret = -ETIMEDOUT;
+                       break;
+               }
+
+               cpu_relax();
+       } while (true);
+
+       pr_err("XGBE serdes not locked: time out.\n");
+       return ret;
+}
+
+static void netcp_xgbe_serdes_enable_xgmii_port(void __iomem *sw_regs)
+{
+       writel(0x03, sw_regs + XGBE_CTRL_OFFSET);
+}
+
+static u32 netcp_xgbe_serdes_read_tbus_val(void __iomem *serdes_regs)
+{
+       u32 tmp;
+
+       if (PHY_A(serdes_regs)) {
+               tmp  = (readl(serdes_regs + 0x0ec) >> 24) & 0x0ff;
+               tmp |= ((readl(serdes_regs + 0x0fc) >> 16) & 0x00f00);
+       } else {
+               tmp  = (readl(serdes_regs + 0x0f8) >> 16) & 0x0fff;
+       }
+
+       return tmp;
+}
+
+static void netcp_xgbe_serdes_write_tbus_addr(void __iomem *serdes_regs,
+                                             int select, int ofs)
+{
+       if (PHY_A(serdes_regs)) {
+               reg_rmw(serdes_regs + 0x0008, ((select << 5) + ofs) << 24,
+                       ~0x00ffffff);
+               return;
+       }
+
+       /* For 2 lane Phy-B, lane0 is actually lane1 */
+       switch (select) {
+       case 1:
+               select = 2;
+               break;
+       case 2:
+               select = 3;
+               break;
+       default:
+               return;
+       }
+
+       reg_rmw(serdes_regs + 0x00fc, ((select << 8) + ofs) << 16, ~0xf800ffff);
+}
+
+static u32 netcp_xgbe_serdes_read_select_tbus(void __iomem *serdes_regs,
+                                             int select, int ofs)
+{
+       /* Set tbus address */
+       netcp_xgbe_serdes_write_tbus_addr(serdes_regs, select, ofs);
+       /* Get TBUS Value */
+       return netcp_xgbe_serdes_read_tbus_val(serdes_regs);
+}
+
+static void netcp_xgbe_serdes_reset_cdr(void __iomem *serdes_regs,
+                                       void __iomem *sig_detect_reg, int lane)
+{
+       u32 tmp, dlpf, tbus;
+
+       /*Get the DLPF values */
+       tmp = netcp_xgbe_serdes_read_select_tbus(
+                       serdes_regs, lane + 1, 5);
+
+       dlpf = tmp >> 2;
+
+       if (dlpf < 400 || dlpf > 700) {
+               reg_rmw(sig_detect_reg, VAL_SH(2, 1), MASK_WID_SH(2, 1));
+               mdelay(1);
+               reg_rmw(sig_detect_reg, VAL_SH(0, 1), MASK_WID_SH(2, 1));
+       } else {
+               tbus = netcp_xgbe_serdes_read_select_tbus(serdes_regs, lane +
+                                                         1, 0xe);
+
+               pr_debug("XGBE: CDR centered, DLPF: %4d,%d,%d.\n",
+                        tmp >> 2, tmp & 3, (tbus >> 2) & 3);
+       }
+}
+
+/* Call every 100 ms */
+static int netcp_xgbe_check_link_status(void __iomem *serdes_regs,
+                                       void __iomem *sw_regs, u32 lanes,
+                                       u32 *current_state, u32 *lane_down)
+{
+       void __iomem *pcsr_base = sw_regs + 0x0600;
+       void __iomem *sig_detect_reg;
+       u32 pcsr_rx_stat, blk_lock, blk_errs;
+       int loss, i, status = 1;
+
+       for (i = 0; i < lanes; i++) {
+               /* Get the Loss bit */
+               loss = readl(serdes_regs + 0x1fc0 + 0x20 + (i * 0x04)) & 0x1;
+
+               /* Get Block Errors and Block Lock bits */
+               pcsr_rx_stat = readl(pcsr_base + 0x0c + (i * 0x80));
+               blk_lock = (pcsr_rx_stat >> 30) & 0x1;
+               blk_errs = (pcsr_rx_stat >> 16) & 0x0ff;
+
+               /* Get Signal Detect Overlay Address */
+               sig_detect_reg = serdes_regs + (i * 0x200) + 0x200 + 0x04;
+
+               /* If Block errors maxed out, attempt recovery! */
+               if (blk_errs == 0x0ff)
+                       blk_lock = 0;
+
+               switch (current_state[i]) {
+               case 0:
+                       /* if good link lock the signal detect ON! */
+                       if (!loss && blk_lock) {
+                               pr_debug("XGBE PCSR Linked Lane: %d\n", i);
+                               reg_rmw(sig_detect_reg, VAL_SH(3, 1),
+                                       MASK_WID_SH(2, 1));
+                               current_state[i] = 1;
+                       } else if (!blk_lock) {
+                               /* if no lock, then reset CDR */
+                               pr_debug("XGBE PCSR Recover Lane: %d\n", i);
+                               netcp_xgbe_serdes_reset_cdr(serdes_regs,
+                                                           sig_detect_reg, i);
+                       }
+                       break;
+
+               case 1:
+                       if (!blk_lock) {
+                               /* Link Lost? */
+                               lane_down[i] = 1;
+                               current_state[i] = 2;
+                       }
+                       break;
+
+               case 2:
+                       if (blk_lock)
+                               /* Nope just noise */
+                               current_state[i] = 1;
+                       else {
+                               /* Lost the block lock, reset CDR if it is
+                                * not centered and go back to sync state
+                                */
+                               netcp_xgbe_serdes_reset_cdr(serdes_regs,
+                                                           sig_detect_reg, i);
+                               current_state[i] = 0;
+                       }
+                       break;
+
+               default:
+                       pr_err("XGBE: unknown current_state[%d] %d\n",
+                              i, current_state[i]);
+                       break;
+               }
+
+               if (blk_errs > 0) {
+                       /* Reset the Error counts! */
+                       reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x19, 0),
+                               MASK_WID_SH(8, 0));
+
+                       reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x00, 0),
+                               MASK_WID_SH(8, 0));
+               }
+
+               status &= (current_state[i] == 1);
+       }
+
+       return status;
+}
+
+static int netcp_xgbe_serdes_check_lane(void __iomem *serdes_regs,
+                                       void __iomem *sw_regs)
+{
+       u32 current_state[2] = {0, 0};
+       int retries = 0, link_up;
+       u32 lane_down[2];
+
+       do {
+               lane_down[0] = 0;
+               lane_down[1] = 0;
+
+               link_up = netcp_xgbe_check_link_status(serdes_regs, sw_regs, 2,
+                                                      current_state,
+                                                      lane_down);
+
+               /* if we did not get link up then wait 100ms before calling
+                * it again
+                */
+               if (link_up)
+                       break;
+
+               if (lane_down[0])
+                       pr_debug("XGBE: detected link down on lane 0\n");
+
+               if (lane_down[1])
+                       pr_debug("XGBE: detected link down on lane 1\n");
+
+               if (++retries > 1) {
+                       pr_debug("XGBE: timeout waiting for serdes link up\n");
+                       return -ETIMEDOUT;
+               }
+               mdelay(100);
+       } while (!link_up);
+
+       pr_debug("XGBE: PCSR link is up\n");
+       return 0;
+}
+
+static void netcp_xgbe_serdes_setup_cm_c1_c2(void __iomem *serdes_regs,
+                                            int lane, int cm, int c1, int c2)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(cfg_cm_c1_c2); i++) {
+               reg_rmw(serdes_regs + cfg_cm_c1_c2[i].ofs + (0x200 * lane),
+                       cfg_cm_c1_c2[i].val,
+                       cfg_cm_c1_c2[i].mask);
+       }
+}
+
+static void netcp_xgbe_reset_serdes(void __iomem *serdes_regs)
+{
+       /* Toggle the POR_EN bit in CONFIG.CPU_CTRL */
+       /* enable POR_EN bit */
+       reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, POR_EN, POR_EN);
+       usleep_range(10, 100);
+
+       /* disable POR_EN bit */
+       reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, 0, POR_EN);
+       usleep_range(10, 100);
+}
+
+static int netcp_xgbe_serdes_config(void __iomem *serdes_regs,
+                                   void __iomem *sw_regs)
+{
+       u32 ret, i;
+
+       netcp_xgbe_serdes_pll_disable(serdes_regs);
+       netcp_xgbe_serdes_cmu_init(serdes_regs);
+
+       for (i = 0; i < 2; i++)
+               netcp_xgbe_serdes_lane_config(serdes_regs, i);
+
+       netcp_xgbe_serdes_com_enable(serdes_regs);
+       /* This is EVM + RTM-BOC specific */
+       for (i = 0; i < 2; i++)
+               netcp_xgbe_serdes_setup_cm_c1_c2(serdes_regs, i, 0, 0, 5);
+
+       netcp_xgbe_serdes_pll_enable(serdes_regs);
+       for (i = 0; i < 2; i++)
+               netcp_xgbe_serdes_lane_enable(serdes_regs, i);
+
+       /* SB PLL Status Poll */
+       ret = netcp_xgbe_wait_pll_locked(sw_regs);
+       if (ret)
+               return ret;
+
+       netcp_xgbe_serdes_enable_xgmii_port(sw_regs);
+       netcp_xgbe_serdes_check_lane(serdes_regs, sw_regs);
+       return ret;
+}
+
+int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs)
+{
+       u32 val;
+
+       /* read COMLANE bits 4:0 */
+       val = readl(serdes_regs + 0xa00);
+       if (val & 0x1f) {
+               pr_debug("XGBE: serdes already in operation - reset\n");
+               netcp_xgbe_reset_serdes(serdes_regs);
+       }
+       return netcp_xgbe_serdes_config(serdes_regs, xgbe_regs);
+}
index f2ff0074aac9b272e4a136c033890abd2db12877..691ec936e88d53601e3ed257278708ae823670c8 100644 (file)
@@ -2540,7 +2540,7 @@ static void tlan_phy_power_down(struct net_device *dev)
         * This is abitrary.  It is intended to make sure the
         * transceiver settles.
         */
-       tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
+       tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP);
 
 }
 
@@ -2561,7 +2561,7 @@ static void tlan_phy_power_up(struct net_device *dev)
         * transceiver.  The TLAN docs say both 50 ms and
         * 500 ms, so do the longer, just in case.
         */
-       tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
+       tlan_set_timer(dev, msecs_to_jiffies(500), TLAN_TIMER_PHY_RESET);
 
 }
 
@@ -2593,7 +2593,7 @@ static void tlan_phy_reset(struct net_device *dev)
         * I don't remember why I wait this long.
         * I've changed this to 50ms, as it seems long enough.
         */
-       tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
+       tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_START_LINK);
 
 }
 
@@ -2658,7 +2658,7 @@ static void tlan_phy_start_link(struct net_device *dev)
                data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
                        | TLAN_NET_CFG_PHY_EN;
                tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
-               tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
+               tlan_set_timer(dev, msecs_to_jiffies(40), TLAN_TIMER_PHY_PDOWN);
                return;
        } else if (priv->phy_num == 0) {
                control = 0;
@@ -2725,7 +2725,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
            (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
            (priv->phy_num != 0)) {
                priv->phy_num = 0;
-               tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
+               tlan_set_timer(dev, msecs_to_jiffies(400), TLAN_TIMER_PHY_PDOWN);
                return;
        }
 
@@ -2744,7 +2744,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
 
        /* Wait for 100 ms.  No reason in partiticular.
         */
-       tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
+       tlan_set_timer(dev, msecs_to_jiffies(100), TLAN_TIMER_FINISH_RESET);
 
 }
 
@@ -2796,7 +2796,7 @@ static void tlan_phy_monitor(unsigned long data)
                                /* set to external PHY */
                                priv->phy_num = 1;
                                /* restart autonegotiation */
-                               tlan_set_timer(dev, 4 * HZ / 10,
+                               tlan_set_timer(dev, msecs_to_jiffies(400),
                                               TLAN_TIMER_PHY_PDOWN);
                                return;
                        }
index a191afc23b56c72bbdc6bf3379b646ba2ec72fdf..17e276651601b27c393001cdd5593cddcd828788 100644 (file)
@@ -1326,7 +1326,8 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media)
        struct rhine_private *rp = netdev_priv(dev);
        void __iomem *ioaddr = rp->base;
 
-       mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
+       if (!rp->mii_if.force_media)
+               mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
 
        if (rp->mii_if.full_duplex)
            iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
@@ -1781,8 +1782,8 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
        rp->tx_ring[entry].desc_length =
                cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
 
-       if (unlikely(vlan_tx_tag_present(skb))) {
-               u16 vid_pcp = vlan_tx_tag_get(skb);
+       if (unlikely(skb_vlan_tag_present(skb))) {
+               u16 vid_pcp = skb_vlan_tag_get(skb);
 
                /* drop CFI/DEI bit, register needs VID and PCP */
                vid_pcp = (vid_pcp & VLAN_VID_MASK) |
@@ -1803,7 +1804,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
 
        /* Non-x86 Todo: explicitly flush cache lines here. */
 
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
                BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
 
index 282f83a63b67a88fec08b2524ced2bbf36fc907a..c20206f83cc1fdf3445cfc41139724f0dbdbdd92 100644 (file)
@@ -2611,8 +2611,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
 
        td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
 
-       if (vlan_tx_tag_present(skb)) {
-               td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+       if (skb_vlan_tag_present(skb)) {
+               td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
                td_ptr->tdesc1.TCR |= TCR0_VETAG;
        }
 
index 9c2d91ea0af48e35020594b73221f4354fc6dc5e..dbcbf0c5bcfa910c49ec81037892a45487aa081e 100644 (file)
@@ -1043,6 +1043,7 @@ static int temac_of_probe(struct platform_device *op)
        lp->regs = of_iomap(op->dev.of_node, 0);
        if (!lp->regs) {
                dev_err(&op->dev, "could not map temac regs.\n");
+               rc = -ENOMEM;
                goto nodev;
        }
 
@@ -1062,6 +1063,7 @@ static int temac_of_probe(struct platform_device *op)
        np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
        if (!np) {
                dev_err(&op->dev, "could not find DMA node\n");
+               rc = -ENODEV;
                goto err_iounmap;
        }
 
index c18a0c637c444741f31c7f0acb05f17aff8fdd31..a6d2860b712c732c5459bea14647da9825ff1042 100644 (file)
@@ -1501,6 +1501,7 @@ static int axienet_of_probe(struct platform_device *op)
        lp->regs = of_iomap(op->dev.of_node, 0);
        if (!lp->regs) {
                dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
+               ret = -ENOMEM;
                goto nodev;
        }
        /* Setup checksum offload, but default to off if not specified */
@@ -1563,6 +1564,7 @@ static int axienet_of_probe(struct platform_device *op)
        np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
        if (!np) {
                dev_err(&op->dev, "could not find DMA node\n");
+               ret = -ENODEV;
                goto err_iounmap;
        }
        lp->dma_regs = of_iomap(np, 0);
index 24858799c204fbe2640ad375b5ea75154b6aa795..9d4ce388510a5034b2f29d890645afdda73b23f0 100644 (file)
@@ -1109,6 +1109,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
        res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
        if (!res) {
                dev_err(dev, "no IRQ found\n");
+               rc = -ENXIO;
                goto error;
        }
 
index 9f49c0129a78a63f9a473012162ab74260f9449a..208eb05446baa4a6980620773865e3746a13848f 100644 (file)
@@ -217,7 +217,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
 static int netvsc_init_buf(struct hv_device *device)
 {
        int ret = 0;
-       int t;
+       unsigned long t;
        struct netvsc_device *net_device;
        struct nvsp_message *init_packet;
        struct net_device *ndev;
@@ -409,7 +409,8 @@ static int negotiate_nvsp_ver(struct hv_device *device,
                              struct nvsp_message *init_packet,
                              u32 nvsp_ver)
 {
-       int ret, t;
+       int ret;
+       unsigned long t;
 
        memset(init_packet, 0, sizeof(struct nvsp_message));
        init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
@@ -684,9 +685,9 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
        return ret_val;
 }
 
-u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
-                           unsigned int section_index,
-                           struct hv_netvsc_packet *packet)
+static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+                                  unsigned int section_index,
+                                  struct hv_netvsc_packet *packet)
 {
        char *start = net_device->send_buf;
        char *dest = (start + (section_index * net_device->send_section_size));
@@ -716,7 +717,7 @@ int netvsc_send(struct hv_device *device,
        u64 req_id;
        unsigned int section_index = NETVSC_INVALID_INDEX;
        u32 msg_size = 0;
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
        u16 q_idx = packet->q_idx;
 
 
@@ -743,8 +744,6 @@ int netvsc_send(struct hv_device *device,
                                                           packet);
                        skb = (struct sk_buff *)
                              (unsigned long)packet->send_completion_tid;
-                       if (skb)
-                               dev_kfree_skb_any(skb);
                        packet->page_buf_cnt = 0;
                }
        }
@@ -810,6 +809,13 @@ int netvsc_send(struct hv_device *device,
                           packet, ret);
        }
 
+       if (ret != 0) {
+               if (section_index != NETVSC_INVALID_INDEX)
+                       netvsc_free_send_slot(net_device, section_index);
+       } else if (skb) {
+               dev_kfree_skb_any(skb);
+       }
+
        return ret;
 }
 
index ec0c40a8f653cb80e7c8a591db8975b79e4d6066..7816d98bdddc2b920598ba3c9f267304dca847bb 100644 (file)
@@ -470,7 +470,7 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
        struct rndis_query_request *query;
        struct rndis_query_complete *query_complete;
        int ret = 0;
-       int t;
+       unsigned long t;
 
        if (!result)
                return -EINVAL;
@@ -560,7 +560,8 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
        char macstr[2*ETH_ALEN+1];
        u32 extlen = sizeof(struct rndis_config_parameter_info) +
                2*NWADR_STRLEN + 4*ETH_ALEN;
-       int ret, t;
+       int ret;
+       unsigned long t;
 
        request = get_rndis_request(rdev, RNDIS_MSG_SET,
                RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
@@ -623,7 +624,8 @@ cleanup:
        return ret;
 }
 
-int rndis_filter_set_offload_params(struct hv_device *hdev,
+static int
+rndis_filter_set_offload_params(struct hv_device *hdev,
                                struct ndis_offload_params *req_offloads)
 {
        struct netvsc_device *nvdev = hv_get_drvdata(hdev);
@@ -634,7 +636,8 @@ int rndis_filter_set_offload_params(struct hv_device *hdev,
        struct ndis_offload_params *offload_params;
        struct rndis_set_complete *set_complete;
        u32 extlen = sizeof(struct ndis_offload_params);
-       int ret, t;
+       int ret;
+       unsigned long t;
        u32 vsp_version = nvdev->nvsp_version;
 
        if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
@@ -697,7 +700,7 @@ u8 netvsc_hash_key[HASH_KEYLEN] = {
        0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
 };
 
-int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
+static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
 {
        struct net_device *ndev = rdev->net_dev->ndev;
        struct rndis_request *request;
@@ -708,7 +711,8 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
        struct ndis_recv_scale_param *rssp;
        u32 *itab;
        u8 *keyp;
-       int i, t, ret;
+       int i, ret;
+       unsigned long t;
 
        request = get_rndis_request(
                        rdev, RNDIS_MSG_SET,
@@ -792,7 +796,8 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
        struct rndis_set_request *set;
        struct rndis_set_complete *set_complete;
        u32 status;
-       int ret, t;
+       int ret;
+       unsigned long t;
        struct net_device *ndev;
 
        ndev = dev->net_dev->ndev;
@@ -848,7 +853,8 @@ static int rndis_filter_init_device(struct rndis_device *dev)
        struct rndis_initialize_request *init;
        struct rndis_initialize_complete *init_complete;
        u32 status;
-       int ret, t;
+       int ret;
+       unsigned long t;
 
        request = get_rndis_request(dev, RNDIS_MSG_INIT,
                        RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
@@ -998,7 +1004,7 @@ int rndis_filter_device_add(struct hv_device *dev,
        struct netvsc_device_info *device_info = additional_info;
        struct ndis_offload_params offloads;
        struct nvsp_message *init_packet;
-       int t;
+       unsigned long t;
        struct ndis_recv_scale_cap rsscap;
        u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
        u32 mtu, size;
index 80632fc59756c0645ea904a4dd1493e4bdec7197..7b051eacb7f184a3770294b5b0d7d60aae5fbe2d 100644 (file)
@@ -427,7 +427,7 @@ at86rf230_reg_precious(struct device *dev, unsigned int reg)
        }
 }
 
-static struct regmap_config at86rf230_regmap_spi_config = {
+static const struct regmap_config at86rf230_regmap_spi_config = {
        .reg_bits = 8,
        .val_bits = 8,
        .write_flag_mask = CMD_REG | CMD_WRITE,
index a43c8acb7268d6ef897f5a992101e0ee125231ff..181b349b060ee552dcc5be089c8cf616d7d965b2 100644 (file)
@@ -44,9 +44,9 @@
 #define        CC2520_FREG_MASK        0x3F
 
 /* status byte values */
-#define        CC2520_STATUS_XOSC32M_STABLE    (1 << 7)
-#define        CC2520_STATUS_RSSI_VALID        (1 << 6)
-#define        CC2520_STATUS_TX_UNDERFLOW      (1 << 3)
+#define        CC2520_STATUS_XOSC32M_STABLE    BIT(7)
+#define        CC2520_STATUS_RSSI_VALID        BIT(6)
+#define        CC2520_STATUS_TX_UNDERFLOW      BIT(3)
 
 /* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */
 #define        CC2520_MINCHANNEL               11
@@ -549,14 +549,14 @@ cc2520_ed(struct ieee802154_hw *hw, u8 *level)
        u8 rssi;
        int ret;
 
-       ret = cc2520_read_register(priv , CC2520_RSSISTAT, &status);
+       ret = cc2520_read_register(priv, CC2520_RSSISTAT, &status);
        if (ret)
                return ret;
 
        if (status != RSSI_VALID)
                return -EINVAL;
 
-       ret = cc2520_read_register(priv , CC2520_RSSI, &rssi);
+       ret = cc2520_read_register(priv, CC2520_RSSI, &rssi);
        if (ret)
                return ret;
 
index a14d87783245a94986232fbbe20b64a57194359d..2a175006028b347efe1fbe6d145f311f78c80bd7 100644 (file)
@@ -9,7 +9,7 @@
 
 #include "ipvlan.h"
 
-static u32 ipvlan_jhash_secret;
+static u32 ipvlan_jhash_secret __read_mostly;
 
 void ipvlan_init_secret(void)
 {
@@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
        };
 
        dst = ip6_route_output(dev_net(dev), NULL, &fl6);
-       if (IS_ERR(dst))
+       if (dst->error) {
+               ret = dst->error;
+               dst_release(dst);
                goto err;
-
+       }
        skb_dst_drop(skb);
        skb_dst_set(skb, dst);
        err = ip6_local_out(skb);
index 58f98f4de773881a1af7fdb2a49a9bec69de6705..58ae11a14bb6f9ea51f322faaafee10b357ddddf 100644 (file)
@@ -1462,17 +1462,12 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
                if (mtt) 
                {
                        /* Check how much time we have used already */
-                       do_gettimeofday(&self->now);
-                       
-                       diff = self->now.tv_usec - self->stamp.tv_usec;
+                       diff = ktime_us_delta(ktime_get(), self->stamp);
                        /* self->stamp is set from ali_ircc_dma_receive_complete() */
                                                        
                        pr_debug("%s(), ******* diff = %d *******\n",
                                 __func__, diff);
-                       
-                       if (diff < 0) 
-                               diff += 1000000;
-                       
+
                        /* Check if the mtt is larger than the time we have
                         * already used by all the protocol processing
                         */
@@ -1884,7 +1879,7 @@ static int  ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
                         * reduce the min turn time a bit since we will know
                         * how much time we have used for protocol processing
                         */
-                       do_gettimeofday(&self->stamp);
+                       self->stamp = ktime_get();
 
                        skb = dev_alloc_skb(len+1);
                        if (skb == NULL)  
index 0c8edb41bd0a78355b2a82fa52f36f04a3d51547..c2d9747a5108cdeeee024aca7142951f3a15c2bb 100644 (file)
@@ -22,7 +22,7 @@
 #ifndef ALI_IRCC_H
 #define ALI_IRCC_H
 
-#include <linux/time.h>
+#include <linux/ktime.h>
 
 #include <linux/spinlock.h>
 #include <linux/pm.h>
@@ -209,8 +209,7 @@ struct ali_ircc_cb {
        
        unsigned char rcvFramesOverflow;
                
-       struct timeval stamp;
-       struct timeval now;
+       ktime_t stamp;
 
        spinlock_t lock;           /* For serializing operations */
        
index e151205281e21369c7b897d966d2b6b2606b6f74..44e4f386a5dc5e99757e329c8b105b7a0a3d8976 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
-#include <linux/time.h>
 #include <linux/types.h>
 #include <linux/ioport.h>
 
@@ -163,8 +162,6 @@ struct au1k_private {
        iobuff_t rx_buff;
 
        struct net_device *netdev;
-       struct timeval stamp;
-       struct timeval now;
        struct qos_info qos;
        struct irlap_cb *irlap;
 
index 48b2f9a321b71c530c313fc5084e3c2c4d74318f..f6c916312577193cd32773cf20798f6509509770 100644 (file)
@@ -495,18 +495,12 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
                mtt = irda_get_mtt(skb);
                if (mtt) {
                        int diff;
-                       do_gettimeofday(&self->now);
-                       diff = self->now.tv_usec - self->stamp.tv_usec;
+                       diff = ktime_us_delta(ktime_get(), self->stamp);
 #ifdef IU_USB_MIN_RTT
                        /* Factor in USB delays -> Get rid of udelay() that
                         * would be lost in the noise - Jean II */
                        diff += IU_USB_MIN_RTT;
 #endif /* IU_USB_MIN_RTT */
-                       /* If the usec counter did wraparound, the diff will
-                        * go negative (tv_usec is a long), so we need to
-                        * correct it by one second. Jean II */
-                       if (diff < 0)
-                               diff += 1000000;
 
                        /* Check if the mtt is larger than the time we have
                         * already used by all the protocol processing
@@ -869,7 +863,7 @@ static void irda_usb_receive(struct urb *urb)
         * reduce the min turn time a bit since we will know
         * how much time we have used for protocol processing
         */
-        do_gettimeofday(&self->stamp);
+       self->stamp = ktime_get();
 
        /* Check if we need to copy the data to a new skb or not.
         * For most frames, we use ZeroCopy and pass the already
index 58ddb52149167cc2cb43d1bbbf23d0bf462694ad..8ac389fa93487c56f4745c5f61a59bd441064677 100644 (file)
@@ -29,7 +29,7 @@
  *
  *****************************************************************************/
 
-#include <linux/time.h>
+#include <linux/ktime.h>
 
 #include <net/irda/irda.h>
 #include <net/irda/irda_device.h>      /* struct irlap_cb */
@@ -157,8 +157,7 @@ struct irda_usb_cb {
        char *speed_buff;               /* Buffer for speed changes */
        char *tx_buff;
 
-       struct timeval stamp;
-       struct timeval now;
+       ktime_t stamp;
 
        spinlock_t lock;                /* For serializing Tx operations */
 
index e638893e98a9784173867899d0c3a695e2df53da..fb5d162ec7d2a563ae55b1fcbe6e64d989ddebd8 100644 (file)
@@ -114,7 +114,6 @@ struct kingsun_cb {
                                           (usually 8) */
 
        iobuff_t          rx_buff;      /* receive unwrap state machine */
-       struct timeval    rx_time;
        spinlock_t lock;
        int receiving;
 
@@ -235,7 +234,6 @@ static void kingsun_rcv_irq(struct urb *urb)
                                                  &kingsun->netdev->stats,
                                                  &kingsun->rx_buff, bytes[i]);
                        }
-                       do_gettimeofday(&kingsun->rx_time);
                        kingsun->receiving =
                                (kingsun->rx_buff.state != OUTSIDE_FRAME)
                                ? 1 : 0;
@@ -273,7 +271,6 @@ static int kingsun_net_open(struct net_device *netdev)
 
        skb_reserve(kingsun->rx_buff.skb, 1);
        kingsun->rx_buff.head = kingsun->rx_buff.skb->data;
-       do_gettimeofday(&kingsun->rx_time);
 
        kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!kingsun->rx_urb)
index e6b3804edacd167c48f81d49d8161c3a61bc4ce9..8e6e0edf24406e266498760d1b3f4c66c19d677b 100644 (file)
@@ -187,7 +187,6 @@ struct ks959_cb {
        __u8 *rx_buf;
        __u8 rx_variable_xormask;
        iobuff_t rx_unwrap_buff;
-       struct timeval rx_time;
 
        struct usb_ctrlrequest *speed_setuprequest;
        struct urb *speed_urb;
@@ -476,7 +475,6 @@ static void ks959_rcv_irq(struct urb *urb)
                                                  bytes[i]);
                        }
                }
-               do_gettimeofday(&kingsun->rx_time);
                kingsun->receiving =
                    (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0;
        }
@@ -514,7 +512,6 @@ static int ks959_net_open(struct net_device *netdev)
 
        skb_reserve(kingsun->rx_unwrap_buff.skb, 1);
        kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data;
-       do_gettimeofday(&kingsun->rx_time);
 
        kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!kingsun->rx_urb)
index e4d678fbeb2f079f54ea88e07cd6e3a4391167b0..bca6a1e72d1d33ea2fd3e530cbbe516890f4eed5 100644 (file)
@@ -722,7 +722,6 @@ static int mcs_net_open(struct net_device *netdev)
 
        skb_reserve(mcs->rx_buff.skb, 1);
        mcs->rx_buff.head = mcs->rx_buff.skb->data;
-       do_gettimeofday(&mcs->rx_time);
 
        /*
         * Now that everything should be initialized properly,
@@ -799,7 +798,6 @@ static void mcs_receive_irq(struct urb *urb)
                        mcs_unwrap_fir(mcs, urb->transfer_buffer,
                                urb->actual_length);
                }
-               do_gettimeofday(&mcs->rx_time);
        }
 
        ret = usb_submit_urb(urb, GFP_ATOMIC);
index b10689b2887c8a39ae9fe7c390e4382865b3688f..a6e8f7dbafc9504a51d1996a0fc6ebb5320e33cc 100644 (file)
@@ -116,7 +116,6 @@ struct mcs_cb {
        __u8 *fifo_status;
 
        iobuff_t rx_buff;       /* receive unwrap state machine */
-       struct timeval rx_time;
        spinlock_t lock;
        int receiving;
 
index e7317b104bfbafa9daf58ca5d1e540de8d0839a9..dc0dbd8dd0b5b293d05c1bfd121410a0be35dc42 100644 (file)
@@ -1501,10 +1501,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb,
                mtt = irda_get_mtt(skb);
                if (mtt) {
                        /* Check how much time we have used already */
-                       do_gettimeofday(&self->now);
-                       diff = self->now.tv_usec - self->stamp.tv_usec;
-                       if (diff < 0) 
-                               diff += 1000000;
+                       diff = ktime_us_delta(ktime_get(), self->stamp);
                        
                        /* Check if the mtt is larger than the time we have
                         * already used by all the protocol processing
@@ -1867,7 +1864,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
                         * reduce the min turn time a bit since we will know
                         * how much time we have used for protocol processing
                         */
-                       do_gettimeofday(&self->stamp);
+                       self->stamp = ktime_get();
 
                        skb = dev_alloc_skb(len+1);
                        if (skb == NULL)  {
index 32fa58211fadc19f0c9df01a9f8c544f3ea963a3..7be5acb56532e0f0f9ba6ba0641487d0e73f1695 100644 (file)
@@ -28,7 +28,7 @@
 #ifndef NSC_IRCC_H
 #define NSC_IRCC_H
 
-#include <linux/time.h>
+#include <linux/ktime.h>
 
 #include <linux/spinlock.h>
 #include <linux/pm.h>
@@ -263,8 +263,7 @@ struct nsc_ircc_cb {
 
        __u8 ier;                  /* Interrupt enable register */
 
-       struct timeval stamp;
-       struct timeval now;
+       ktime_t stamp;
 
        spinlock_t lock;           /* For serializing operations */
        
index dd1bd1060ec983667acdcf02f2d255a3e6f055ef..83cc48a01802b76fe2a53923e13263cea80e484f 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/moduleparam.h>
 
 #include <linux/kernel.h>
+#include <linux/ktime.h>
 #include <linux/types.h>
 #include <linux/time.h>
 #include <linux/skbuff.h>
@@ -174,7 +175,7 @@ struct stir_cb {
        __u8              *fifo_status;
 
        iobuff_t          rx_buff;      /* receive unwrap state machine */
-       struct timeval    rx_time;
+       ktime_t         rx_time;
        int               receiving;
        struct urb       *rx_urb;
 };
@@ -650,15 +651,12 @@ static int fifo_txwait(struct stir_cb *stir, int space)
 static void turnaround_delay(const struct stir_cb *stir, long us)
 {
        long ticks;
-       struct timeval now;
 
        if (us <= 0)
                return;
 
-       do_gettimeofday(&now);
-       if (now.tv_sec - stir->rx_time.tv_sec > 0)
-               us -= USEC_PER_SEC;
-       us -= now.tv_usec - stir->rx_time.tv_usec;
+       us -= ktime_us_delta(ktime_get(), stir->rx_time);
+
        if (us < 10)
                return;
 
@@ -823,8 +821,8 @@ static void stir_rcv_irq(struct urb *urb)
                pr_debug("receive %d\n", urb->actual_length);
                unwrap_chars(stir, urb->transfer_buffer,
                             urb->actual_length);
-               
-               do_gettimeofday(&stir->rx_time);
+
+               stir->rx_time = ktime_get();
        }
 
        /* kernel thread is stopping receiver don't resubmit */
@@ -876,7 +874,7 @@ static int stir_net_open(struct net_device *netdev)
 
        skb_reserve(stir->rx_buff.skb, 1);
        stir->rx_buff.head = stir->rx_buff.skb->data;
-       do_gettimeofday(&stir->rx_time);
+       stir->rx_time = ktime_get();
 
        stir->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!stir->rx_urb) 
index 7ce820ecc361519243a98f2f150eb145b2715f74..ac1525573398fbf851f8b8dd7783669bffa8c843 100644 (file)
@@ -29,7 +29,6 @@ this program; if not, see <http://www.gnu.org/licenses/>.
  ********************************************************************/
 #ifndef via_IRCC_H
 #define via_IRCC_H
-#include <linux/time.h>
 #include <linux/spinlock.h>
 #include <linux/pm.h>
 #include <linux/types.h>
@@ -106,9 +105,6 @@ struct via_ircc_cb {
 
        __u8 ier;               /* Interrupt enable register */
 
-       struct timeval stamp;
-       struct timeval now;
-
        spinlock_t lock;        /* For serializing operations */
 
        __u32 flags;            /* Interface flags */
index ac39d9f33d5fcf9734f889123b881c14d2716d8c..a0849f49bbec7401c3c6a366e0ca5c8d98fb651f 100644 (file)
@@ -33,6 +33,7 @@ MODULE_LICENSE("GPL");
 /********************************************************/
 
 #include <linux/kernel.h>
+#include <linux/ktime.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
@@ -40,9 +41,9 @@ MODULE_LICENSE("GPL");
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/delay.h>
-#include <linux/time.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <linux/math64.h>
 #include <linux/mutex.h>
 #include <asm/uaccess.h>
 #include <asm/byteorder.h>
@@ -180,8 +181,7 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
        vlsi_irda_dev_t *idev = netdev_priv(ndev);
        u8 byte;
        u16 word;
-       unsigned delta1, delta2;
-       struct timeval now;
+       s32 sec, usec;
        unsigned iobase = ndev->base_addr;
 
        seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name,
@@ -277,17 +277,9 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
        seq_printf(seq, "\nsw-state:\n");
        seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud, 
                (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"));
-       do_gettimeofday(&now);
-       if (now.tv_usec >= idev->last_rx.tv_usec) {
-               delta2 = now.tv_usec - idev->last_rx.tv_usec;
-               delta1 = 0;
-       }
-       else {
-               delta2 = 1000000 + now.tv_usec - idev->last_rx.tv_usec;
-               delta1 = 1;
-       }
-       seq_printf(seq, "last rx: %lu.%06u sec\n",
-               now.tv_sec - idev->last_rx.tv_sec - delta1, delta2);    
+       sec = div_s64_rem(ktime_us_delta(ktime_get(), idev->last_rx),
+                         USEC_PER_SEC, &usec);
+       seq_printf(seq, "last rx: %ul.%06u sec\n", sec, usec);
 
        seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
                ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors,
@@ -661,7 +653,7 @@ static void vlsi_rx_interrupt(struct net_device *ndev)
                }
        }
 
-       do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */
+       idev->last_rx = ktime_get(); /* remember "now" for later mtt delay */
 
        vlsi_fill_rx(r);
 
@@ -858,9 +850,8 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
        unsigned iobase = ndev->base_addr;
        u8 status;
        u16 config;
-       int mtt;
+       int mtt, diff;
        int len, speed;
-       struct timeval  now, ready;
        char *msg = NULL;
 
        speed = irda_get_next_speed(skb);
@@ -940,21 +931,10 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
        spin_unlock_irqrestore(&idev->lock, flags);
 
        if ((mtt = irda_get_mtt(skb)) > 0) {
-       
-               ready.tv_usec = idev->last_rx.tv_usec + mtt;
-               ready.tv_sec = idev->last_rx.tv_sec;
-               if (ready.tv_usec >= 1000000) {
-                       ready.tv_usec -= 1000000;
-                       ready.tv_sec++;         /* IrLAP 1.1: mtt always < 1 sec */
-               }
-               for(;;) {
-                       do_gettimeofday(&now);
-                       if (now.tv_sec > ready.tv_sec ||
-                           (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
-                               break;
-                       udelay(100);
+               diff = ktime_us_delta(ktime_get(), idev->last_rx);
+               if (mtt > diff)
+                       udelay(mtt - diff);
                        /* must not sleep here - called under netif_tx_lock! */
-               }
        }
 
        /* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu()
@@ -1333,7 +1313,7 @@ static int vlsi_start_hw(vlsi_irda_dev_t *idev)
 
        vlsi_fill_rx(idev->rx_ring);
 
-       do_gettimeofday(&idev->last_rx);        /* first mtt may start from now on */
+       idev->last_rx = ktime_get();    /* first mtt may start from now on */
 
        outw(0, iobase+VLSI_PIO_PROMPT);        /* kick hw state machine */
 
@@ -1520,7 +1500,7 @@ static int vlsi_open(struct net_device *ndev)
        if (!idev->irlap)
                goto errout_free_ring;
 
-       do_gettimeofday(&idev->last_rx);  /* first mtt may start from now on */
+       idev->last_rx = ktime_get();  /* first mtt may start from now on */
 
        idev->new_baud = 9600;          /* start with IrPHY using 9600(SIR) mode */
 
index f9119c6d2a09082bb411b8887ea9f25e84f8137f..f9db2ce4c5c667357ba9c2312c31a609d6e5c459 100644 (file)
@@ -723,7 +723,7 @@ typedef struct vlsi_irda_dev {
        void                    *virtaddr;
        struct vlsi_ring        *tx_ring, *rx_ring;
 
-       struct timeval          last_rx;
+       ktime_t                 last_rx;
 
        spinlock_t              lock;
        struct mutex            mtx;
index 612e0731142d29aef8cca9e882ef66e237fb4960..1df38bdae2ee384d1c6285a699b7dbf31dd28f59 100644 (file)
@@ -1471,11 +1471,17 @@ int macvlan_link_register(struct rtnl_link_ops *ops)
 };
 EXPORT_SYMBOL_GPL(macvlan_link_register);
 
+static struct net *macvlan_get_link_net(const struct net_device *dev)
+{
+       return dev_net(macvlan_dev_real_dev(dev));
+}
+
 static struct rtnl_link_ops macvlan_link_ops = {
        .kind           = "macvlan",
        .setup          = macvlan_setup,
        .newlink        = macvlan_newlink,
        .dellink        = macvlan_dellink,
+       .get_link_net   = macvlan_get_link_net,
 };
 
 static int macvlan_device_event(struct notifier_block *unused,
index 7df221788cd4dc7ae4d8c3ed2f53789296d00deb..e40fdfccc9c10df4ea8676a1dd59275d5d9c6b88 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/fs.h>
 #include <linux/uio.h>
 
-#include <net/ipv6.h>
 #include <net/net_namespace.h>
 #include <net/rtnetlink.h>
 #include <net/sock.h>
@@ -81,7 +80,7 @@ static struct cdev macvtap_cdev;
 static const struct proto_ops macvtap_socket_ops;
 
 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
-                     NETIF_F_TSO6)
+                     NETIF_F_TSO6 | NETIF_F_UFO)
 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
 
@@ -586,11 +585,7 @@ static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q,
                        gso_type = SKB_GSO_TCPV6;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
-                       pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
-                                    current->comm);
                        gso_type = SKB_GSO_UDP;
-                       if (skb->protocol == htons(ETH_P_IPV6))
-                               ipv6_proxy_select_ident(skb);
                        break;
                default:
                        return -EINVAL;
@@ -636,6 +631,8 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
                        vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                else if (sinfo->gso_type & SKB_GSO_TCPV6)
                        vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+               else if (sinfo->gso_type & SKB_GSO_UDP)
+                       vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
                else
                        BUG();
                if (sinfo->gso_type & SKB_GSO_TCP_ECN)
@@ -645,7 +642,7 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-               if (vlan_tx_tag_present(skb))
+               if (skb_vlan_tag_present(skb))
                        vnet_hdr->csum_start = cpu_to_macvtap16(q,
                                skb_checksum_start_offset(skb) + VLAN_HLEN);
                else
@@ -821,13 +818,13 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
        total = vnet_hdr_len;
        total += skb->len;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                struct {
                        __be16 h_vlan_proto;
                        __be16 h_vlan_TCI;
                } veth;
                veth.h_vlan_proto = skb->vlan_proto;
-               veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+               veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
 
                vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
                total += VLAN_HLEN;
@@ -965,6 +962,9 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
                        if (arg & TUN_F_TSO6)
                                feature_mask |= NETIF_F_TSO6;
                }
+
+               if (arg & TUN_F_UFO)
+                       feature_mask |= NETIF_F_UFO;
        }
 
        /* tun/tap driver inverts the usage for TSO offloads, where
@@ -975,7 +975,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
         * When user space turns off TSO, we turn off GSO/LRO so that
         * user-space will not receive TSO frames.
         */
-       if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
+       if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
                features |= RX_OFFLOADS;
        else
                features &= ~RX_OFFLOADS;
@@ -1090,7 +1090,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
        case TUNSETOFFLOAD:
                /* let the user check for future flags */
                if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
-                           TUN_F_TSO_ECN))
+                           TUN_F_TSO_ECN | TUN_F_UFO))
                        return -EINVAL;
 
                rtnl_lock();
index 4a99c391903790af8ce6b727918455d90415842b..993570b1e2aeb68726c268eea5a8332a2b8159f6 100644 (file)
@@ -302,7 +302,7 @@ void mii_check_link (struct mii_if_info *mii)
 }
 
 /**
- * mii_check_media - check the MII interface for a duplex change
+ * mii_check_media - check the MII interface for a carrier/speed/duplex change
  * @mii: the MII interface
  * @ok_to_print: OK to print link up/down messages
  * @init_media: OK to save duplex mode in @mii
@@ -318,10 +318,6 @@ unsigned int mii_check_media (struct mii_if_info *mii,
        int advertise, lpa, media, duplex;
        int lpa2 = 0;
 
-       /* if forced media, go no further */
-       if (mii->force_media)
-               return 0; /* duplex did not change */
-
        /* check current and old link status */
        old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0;
        new_carrier = (unsigned int) mii_link_ok(mii);
@@ -345,6 +341,12 @@ unsigned int mii_check_media (struct mii_if_info *mii,
         */
        netif_carrier_on(mii->dev);
 
+       if (mii->force_media) {
+               if (ok_to_print)
+                       netdev_info(mii->dev, "link up\n");
+               return 0; /* duplex did not change */
+       }
+
        /* get MII advertise and LPA values */
        if ((!init_media) && (mii->advertising))
                advertise = mii->advertising;
index a3c251b79f38b5dcfa8d48d4c3fbceb1e94ef25f..16adbc481772babbcb6113759fc47ad07e9dda20 100644 (file)
@@ -26,7 +26,7 @@ config AMD_PHY
 
 config AMD_XGBE_PHY
        tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
-       depends on OF && HAS_IOMEM
+       depends on (OF || ACPI) && HAS_IOMEM
        ---help---
          Currently supports the AMD 10GbE PHY
 
index 903dc3dc9ea703dfae71dc4917b432e413735ee4..9e3af54c90102a2c113596d326d893670b7e6c24 100644 (file)
@@ -60,6 +60,7 @@
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/delay.h>
+#include <linux/workqueue.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
@@ -74,6 +75,9 @@
 #include <linux/of_platform.h>
 #include <linux/of_device.h>
 #include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/property.h>
+#include <linux/acpi.h>
 
 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
 MODULE_LICENSE("Dual BSD/GPL");
@@ -84,22 +88,43 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define XGBE_PHY_MASK  0xfffffff0
 
 #define XGBE_PHY_SPEEDSET_PROPERTY     "amd,speed-set"
+#define XGBE_PHY_BLWC_PROPERTY         "amd,serdes-blwc"
+#define XGBE_PHY_CDR_RATE_PROPERTY     "amd,serdes-cdr-rate"
+#define XGBE_PHY_PQ_SKEW_PROPERTY      "amd,serdes-pq-skew"
+#define XGBE_PHY_TX_AMP_PROPERTY       "amd,serdes-tx-amp"
+
+#define XGBE_PHY_SPEEDS                        3
+#define XGBE_PHY_SPEED_1000            0
+#define XGBE_PHY_SPEED_2500            1
+#define XGBE_PHY_SPEED_10000           2
 
 #define XGBE_AN_INT_CMPLT              0x01
 #define XGBE_AN_INC_LINK               0x02
 #define XGBE_AN_PG_RCV                 0x04
+#define XGBE_AN_INT_MASK               0x07
 
 #define XNP_MCF_NULL_MESSAGE           0x001
-#define XNP_ACK_PROCESSED              (1 << 12)
-#define XNP_MP_FORMATTED               (1 << 13)
-#define XNP_NP_EXCHANGE                        (1 << 15)
+#define XNP_ACK_PROCESSED              BIT(12)
+#define XNP_MP_FORMATTED               BIT(13)
+#define XNP_NP_EXCHANGE                        BIT(15)
 
 #define XGBE_PHY_RATECHANGE_COUNT      500
 
+#define XGBE_PHY_KR_TRAINING_START     0x01
+#define XGBE_PHY_KR_TRAINING_ENABLE    0x02
+
+#define XGBE_PHY_FEC_ENABLE            0x01
+#define XGBE_PHY_FEC_FORWARD           0x02
+#define XGBE_PHY_FEC_MASK              0x03
+
 #ifndef MDIO_PMA_10GBR_PMD_CTRL
 #define MDIO_PMA_10GBR_PMD_CTRL                0x0096
 #endif
 
+#ifndef MDIO_PMA_10GBR_FEC_ABILITY
+#define MDIO_PMA_10GBR_FEC_ABILITY     0x00aa
+#endif
+
 #ifndef MDIO_PMA_10GBR_FEC_CTRL
 #define MDIO_PMA_10GBR_FEC_CTRL                0x00ab
 #endif
@@ -108,6 +133,10 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define MDIO_AN_XNP                    0x0016
 #endif
 
+#ifndef MDIO_AN_LPX
+#define MDIO_AN_LPX                    0x0019
+#endif
+
 #ifndef MDIO_AN_INTMASK
 #define MDIO_AN_INTMASK                        0x8001
 #endif
@@ -116,18 +145,10 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define MDIO_AN_INT                    0x8002
 #endif
 
-#ifndef MDIO_AN_KR_CTRL
-#define MDIO_AN_KR_CTRL                        0x8003
-#endif
-
 #ifndef MDIO_CTRL1_SPEED1G
 #define MDIO_CTRL1_SPEED1G             (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
 #endif
 
-#ifndef MDIO_KR_CTRL_PDETECT
-#define MDIO_KR_CTRL_PDETECT           0x01
-#endif
-
 /* SerDes integration register offsets */
 #define SIR0_KR_RT_1                   0x002c
 #define SIR0_STATUS                    0x0040
@@ -140,10 +161,10 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define SIR0_STATUS_RX_READY_WIDTH     1
 #define SIR0_STATUS_TX_READY_INDEX     8
 #define SIR0_STATUS_TX_READY_WIDTH     1
+#define SIR1_SPEED_CDR_RATE_INDEX      12
+#define SIR1_SPEED_CDR_RATE_WIDTH      4
 #define SIR1_SPEED_DATARATE_INDEX      4
 #define SIR1_SPEED_DATARATE_WIDTH      2
-#define SIR1_SPEED_PI_SPD_SEL_INDEX    12
-#define SIR1_SPEED_PI_SPD_SEL_WIDTH    4
 #define SIR1_SPEED_PLLSEL_INDEX                3
 #define SIR1_SPEED_PLLSEL_WIDTH                1
 #define SIR1_SPEED_RATECHANGE_INDEX    6
@@ -153,20 +174,26 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define SIR1_SPEED_WORDMODE_INDEX      0
 #define SIR1_SPEED_WORDMODE_WIDTH      3
 
+#define SPEED_10000_BLWC               0
 #define SPEED_10000_CDR                        0x7
 #define SPEED_10000_PLL                        0x1
+#define SPEED_10000_PQ                 0x1e
 #define SPEED_10000_RATE               0x0
 #define SPEED_10000_TXAMP              0xa
 #define SPEED_10000_WORD               0x7
 
+#define SPEED_2500_BLWC                        1
 #define SPEED_2500_CDR                 0x2
 #define SPEED_2500_PLL                 0x0
+#define SPEED_2500_PQ                  0xa
 #define SPEED_2500_RATE                        0x1
 #define SPEED_2500_TXAMP               0xf
 #define SPEED_2500_WORD                        0x1
 
+#define SPEED_1000_BLWC                        1
 #define SPEED_1000_CDR                 0x2
 #define SPEED_1000_PLL                 0x0
+#define SPEED_1000_PQ                  0xa
 #define SPEED_1000_RATE                        0x3
 #define SPEED_1000_TXAMP               0xf
 #define SPEED_1000_WORD                        0x1
@@ -181,15 +208,6 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define RXTX_REG114_PQ_REG_INDEX       9
 #define RXTX_REG114_PQ_REG_WIDTH       7
 
-#define RXTX_10000_BLWC                        0
-#define RXTX_10000_PQ                  0x1e
-
-#define RXTX_2500_BLWC                 1
-#define RXTX_2500_PQ                   0xa
-
-#define RXTX_1000_BLWC                 1
-#define RXTX_1000_PQ                   0xa
-
 /* Bit setting and getting macros
  *  The get macro will extract the current bit field value from within
  *  the variable
@@ -291,23 +309,44 @@ do {                                                                      \
        XRXTX_IOWRITE((_priv), _reg, reg_val);                          \
 } while (0)
 
+static const u32 amd_xgbe_phy_serdes_blwc[] = {
+       SPEED_1000_BLWC,
+       SPEED_2500_BLWC,
+       SPEED_10000_BLWC,
+};
+
+static const u32 amd_xgbe_phy_serdes_cdr_rate[] = {
+       SPEED_1000_CDR,
+       SPEED_2500_CDR,
+       SPEED_10000_CDR,
+};
+
+static const u32 amd_xgbe_phy_serdes_pq_skew[] = {
+       SPEED_1000_PQ,
+       SPEED_2500_PQ,
+       SPEED_10000_PQ,
+};
+
+static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
+       SPEED_1000_TXAMP,
+       SPEED_2500_TXAMP,
+       SPEED_10000_TXAMP,
+};
+
 enum amd_xgbe_phy_an {
        AMD_XGBE_AN_READY = 0,
-       AMD_XGBE_AN_START,
-       AMD_XGBE_AN_EVENT,
        AMD_XGBE_AN_PAGE_RECEIVED,
        AMD_XGBE_AN_INCOMPAT_LINK,
        AMD_XGBE_AN_COMPLETE,
        AMD_XGBE_AN_NO_LINK,
-       AMD_XGBE_AN_EXIT,
        AMD_XGBE_AN_ERROR,
 };
 
 enum amd_xgbe_phy_rx {
-       AMD_XGBE_RX_READY = 0,
-       AMD_XGBE_RX_BPA,
+       AMD_XGBE_RX_BPA = 0,
        AMD_XGBE_RX_XNP,
        AMD_XGBE_RX_COMPLETE,
+       AMD_XGBE_RX_ERROR,
 };
 
 enum amd_xgbe_phy_mode {
@@ -316,12 +355,13 @@ enum amd_xgbe_phy_mode {
 };
 
 enum amd_xgbe_phy_speedset {
-       AMD_XGBE_PHY_SPEEDSET_1000_10000,
+       AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
        AMD_XGBE_PHY_SPEEDSET_2500_10000,
 };
 
 struct amd_xgbe_phy_priv {
        struct platform_device *pdev;
+       struct acpi_device *adev;
        struct device *dev;
 
        struct phy_device *phydev;
@@ -336,10 +376,24 @@ struct amd_xgbe_phy_priv {
        void __iomem *sir0_regs;        /* SerDes integration registers (1/2) */
        void __iomem *sir1_regs;        /* SerDes integration registers (2/2) */
 
-       /* Maintain link status for re-starting auto-negotiation */
-       unsigned int link;
+       int an_irq;
+       char an_irq_name[IFNAMSIZ + 32];
+       struct work_struct an_irq_work;
+       unsigned int an_irq_allocated;
+
        unsigned int speed_set;
 
+       /* SerDes UEFI configurable settings.
+        *   Switching between modes/speeds requires new values for some
+        *   SerDes settings.  The values can be supplied as device
+        *   properties in array format.  The first array entry is for
+        *   1GbE, second for 2.5GbE and third for 10GbE
+        */
+       u32 serdes_blwc[XGBE_PHY_SPEEDS];
+       u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
+       u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
+       u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
+
        /* Auto-negotiation state machine support */
        struct mutex an_mutex;
        enum amd_xgbe_phy_an an_result;
@@ -348,7 +402,11 @@ struct amd_xgbe_phy_priv {
        enum amd_xgbe_phy_rx kx_state;
        struct work_struct an_work;
        struct workqueue_struct *an_workqueue;
+       unsigned int an_supported;
        unsigned int parallel_detect;
+       unsigned int fec_ability;
+
+       unsigned int lpm_ctrl;          /* CTRL1 for resume */
 };
 
 static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
@@ -359,7 +417,7 @@ static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
        if (ret < 0)
                return ret;
 
-       ret |= 0x02;
+       ret |= XGBE_PHY_KR_TRAINING_ENABLE;
        phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
 
        return 0;
@@ -373,7 +431,7 @@ static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
        if (ret < 0)
                return ret;
 
-       ret &= ~0x02;
+       ret &= ~XGBE_PHY_KR_TRAINING_ENABLE;
        phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
 
        return 0;
@@ -466,12 +524,16 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
 
        XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
        XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_10000_TXAMP);
        XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_10000_CDR);
 
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_10000_BLWC);
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_10000_PQ);
+       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
+                          priv->serdes_cdr_rate[XGBE_PHY_SPEED_10000]);
+       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
+                          priv->serdes_tx_amp[XGBE_PHY_SPEED_10000]);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
+                          priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
+                          priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
 
        amd_xgbe_phy_serdes_complete_ratechange(phydev);
 
@@ -514,12 +576,16 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
 
        XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
        XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_2500_TXAMP);
        XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_2500_CDR);
 
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_2500_BLWC);
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_2500_PQ);
+       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
+                          priv->serdes_cdr_rate[XGBE_PHY_SPEED_2500]);
+       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
+                          priv->serdes_tx_amp[XGBE_PHY_SPEED_2500]);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
+                          priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
+                          priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
 
        amd_xgbe_phy_serdes_complete_ratechange(phydev);
 
@@ -562,12 +628,16 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
 
        XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
        XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_1000_TXAMP);
        XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_1000_CDR);
 
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_1000_BLWC);
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1000_PQ);
+       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
+                          priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]);
+       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
+                          priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
+                          priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
+                          priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
 
        amd_xgbe_phy_serdes_complete_ratechange(phydev);
 
@@ -635,6 +705,38 @@ static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
        return ret;
 }
 
+static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
+                              bool restart)
+{
+       int ret;
+
+       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+       if (ret < 0)
+               return ret;
+
+       ret &= ~MDIO_AN_CTRL1_ENABLE;
+
+       if (enable)
+               ret |= MDIO_AN_CTRL1_ENABLE;
+
+       if (restart)
+               ret |= MDIO_AN_CTRL1_RESTART;
+
+       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
+
+       return 0;
+}
+
+static int amd_xgbe_phy_restart_an(struct phy_device *phydev)
+{
+       return amd_xgbe_phy_set_an(phydev, true, true);
+}
+
+static int amd_xgbe_phy_disable_an(struct phy_device *phydev)
+{
+       return amd_xgbe_phy_set_an(phydev, false, false);
+}
+
 static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
                                                    enum amd_xgbe_phy_rx *state)
 {
@@ -645,7 +747,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
 
        /* If we're not in KR mode then we're done */
        if (!amd_xgbe_phy_in_kr_mode(phydev))
-               return AMD_XGBE_AN_EVENT;
+               return AMD_XGBE_AN_PAGE_RECEIVED;
 
        /* Enable/Disable FEC */
        ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
@@ -660,10 +762,9 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
        if (ret < 0)
                return AMD_XGBE_AN_ERROR;
 
+       ret &= ~XGBE_PHY_FEC_MASK;
        if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
-               ret |= 0x01;
-       else
-               ret &= ~0x01;
+               ret |= priv->fec_ability;
 
        phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
 
@@ -672,14 +773,17 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
        if (ret < 0)
                return AMD_XGBE_AN_ERROR;
 
-       XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
+       if (ret & XGBE_PHY_KR_TRAINING_ENABLE) {
+               XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
 
-       ret |= 0x01;
-       phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
+               ret |= XGBE_PHY_KR_TRAINING_START;
+               phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
+                             ret);
 
-       XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
+               XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
+       }
 
-       return AMD_XGBE_AN_EVENT;
+       return AMD_XGBE_AN_PAGE_RECEIVED;
 }
 
 static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
@@ -696,7 +800,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
        phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
        phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
 
-       return AMD_XGBE_AN_EVENT;
+       return AMD_XGBE_AN_PAGE_RECEIVED;
 }
 
 static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
@@ -735,11 +839,11 @@ static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
        int ad_reg, lp_reg;
 
        /* Check Extended Next Page support */
-       ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+       ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP);
        if (ad_reg < 0)
                return AMD_XGBE_AN_ERROR;
 
-       lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
+       lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX);
        if (lp_reg < 0)
                return AMD_XGBE_AN_ERROR;
 
@@ -748,226 +852,255 @@ static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
               amd_xgbe_an_tx_training(phydev, state);
 }
 
-static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
+static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
+{
+       struct amd_xgbe_phy_priv *priv = phydev->priv;
+       enum amd_xgbe_phy_rx *state;
+       int ret;
+
+       state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
+                                               : &priv->kx_state;
+
+       switch (*state) {
+       case AMD_XGBE_RX_BPA:
+               ret = amd_xgbe_an_rx_bpa(phydev, state);
+               break;
+
+       case AMD_XGBE_RX_XNP:
+               ret = amd_xgbe_an_rx_xnp(phydev, state);
+               break;
+
+       default:
+               ret = AMD_XGBE_AN_ERROR;
+       }
+
+       return ret;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
 {
        struct amd_xgbe_phy_priv *priv = phydev->priv;
        int ret;
 
        /* Be sure we aren't looping trying to negotiate */
        if (amd_xgbe_phy_in_kr_mode(phydev)) {
-               if (priv->kr_state != AMD_XGBE_RX_READY)
+               priv->kr_state = AMD_XGBE_RX_ERROR;
+
+               if (!(phydev->supported & SUPPORTED_1000baseKX_Full) &&
+                   !(phydev->supported & SUPPORTED_2500baseX_Full))
+                       return AMD_XGBE_AN_NO_LINK;
+
+               if (priv->kx_state != AMD_XGBE_RX_BPA)
                        return AMD_XGBE_AN_NO_LINK;
-               priv->kr_state = AMD_XGBE_RX_BPA;
        } else {
-               if (priv->kx_state != AMD_XGBE_RX_READY)
+               priv->kx_state = AMD_XGBE_RX_ERROR;
+
+               if (!(phydev->supported & SUPPORTED_10000baseKR_Full))
+                       return AMD_XGBE_AN_NO_LINK;
+
+               if (priv->kr_state != AMD_XGBE_RX_BPA)
                        return AMD_XGBE_AN_NO_LINK;
-               priv->kx_state = AMD_XGBE_RX_BPA;
        }
 
-       /* Set up Advertisement register 3 first */
-       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
-       if (ret < 0)
+       ret = amd_xgbe_phy_disable_an(phydev);
+       if (ret)
                return AMD_XGBE_AN_ERROR;
 
-       if (phydev->supported & SUPPORTED_10000baseR_FEC)
-               ret |= 0xc000;
-       else
-               ret &= ~0xc000;
-
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
-
-       /* Set up Advertisement register 2 next */
-       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
-       if (ret < 0)
+       ret = amd_xgbe_phy_switch_mode(phydev);
+       if (ret)
                return AMD_XGBE_AN_ERROR;
 
-       if (phydev->supported & SUPPORTED_10000baseKR_Full)
-               ret |= 0x80;
-       else
-               ret &= ~0x80;
-
-       if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
-           (phydev->supported & SUPPORTED_2500baseX_Full))
-               ret |= 0x20;
-       else
-               ret &= ~0x20;
-
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
-
-       /* Set up Advertisement register 1 last */
-       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
-       if (ret < 0)
+       ret = amd_xgbe_phy_restart_an(phydev);
+       if (ret)
                return AMD_XGBE_AN_ERROR;
 
-       if (phydev->supported & SUPPORTED_Pause)
-               ret |= 0x400;
-       else
-               ret &= ~0x400;
+       return AMD_XGBE_AN_INCOMPAT_LINK;
+}
 
-       if (phydev->supported & SUPPORTED_Asym_Pause)
-               ret |= 0x800;
-       else
-               ret &= ~0x800;
+static irqreturn_t amd_xgbe_an_isr(int irq, void *data)
+{
+       struct amd_xgbe_phy_priv *priv = (struct amd_xgbe_phy_priv *)data;
 
-       /* We don't intend to perform XNP */
-       ret &= ~XNP_NP_EXCHANGE;
+       /* Interrupt reason must be read and cleared outside of IRQ context */
+       disable_irq_nosync(priv->an_irq);
 
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
+       queue_work(priv->an_workqueue, &priv->an_irq_work);
 
-       /* Enable and start auto-negotiation */
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+       return IRQ_HANDLED;
+}
 
-       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL);
-       if (ret < 0)
-               return AMD_XGBE_AN_ERROR;
+static void amd_xgbe_an_irq_work(struct work_struct *work)
+{
+       struct amd_xgbe_phy_priv *priv = container_of(work,
+                                                     struct amd_xgbe_phy_priv,
+                                                     an_irq_work);
 
-       ret |= MDIO_KR_CTRL_PDETECT;
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL, ret);
+       /* Avoid a race between enabling the IRQ and exiting the work by
+        * waiting for the work to finish and then queueing it
+        */
+       flush_work(&priv->an_work);
+       queue_work(priv->an_workqueue, &priv->an_work);
+}
 
-       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
-       if (ret < 0)
-               return AMD_XGBE_AN_ERROR;
+static void amd_xgbe_an_state_machine(struct work_struct *work)
+{
+       struct amd_xgbe_phy_priv *priv = container_of(work,
+                                                     struct amd_xgbe_phy_priv,
+                                                     an_work);
+       struct phy_device *phydev = priv->phydev;
+       enum amd_xgbe_phy_an cur_state = priv->an_state;
+       int int_reg, int_mask;
 
-       ret |= MDIO_AN_CTRL1_ENABLE;
-       ret |= MDIO_AN_CTRL1_RESTART;
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
+       mutex_lock(&priv->an_mutex);
 
-       return AMD_XGBE_AN_EVENT;
-}
+       /* Read the interrupt */
+       int_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
+       if (!int_reg)
+               goto out;
 
-static enum amd_xgbe_phy_an amd_xgbe_an_event(struct phy_device *phydev)
-{
-       enum amd_xgbe_phy_an new_state;
-       int ret;
+next_int:
+       if (int_reg < 0) {
+               priv->an_state = AMD_XGBE_AN_ERROR;
+               int_mask = XGBE_AN_INT_MASK;
+       } else if (int_reg & XGBE_AN_PG_RCV) {
+               priv->an_state = AMD_XGBE_AN_PAGE_RECEIVED;
+               int_mask = XGBE_AN_PG_RCV;
+       } else if (int_reg & XGBE_AN_INC_LINK) {
+               priv->an_state = AMD_XGBE_AN_INCOMPAT_LINK;
+               int_mask = XGBE_AN_INC_LINK;
+       } else if (int_reg & XGBE_AN_INT_CMPLT) {
+               priv->an_state = AMD_XGBE_AN_COMPLETE;
+               int_mask = XGBE_AN_INT_CMPLT;
+       } else {
+               priv->an_state = AMD_XGBE_AN_ERROR;
+               int_mask = 0;
+       }
 
-       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
-       if (ret < 0)
-               return AMD_XGBE_AN_ERROR;
+       /* Clear the interrupt to be processed */
+       int_reg &= ~int_mask;
+       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
 
-       new_state = AMD_XGBE_AN_EVENT;
-       if (ret & XGBE_AN_PG_RCV)
-               new_state = AMD_XGBE_AN_PAGE_RECEIVED;
-       else if (ret & XGBE_AN_INC_LINK)
-               new_state = AMD_XGBE_AN_INCOMPAT_LINK;
-       else if (ret & XGBE_AN_INT_CMPLT)
-               new_state = AMD_XGBE_AN_COMPLETE;
+       priv->an_result = priv->an_state;
 
-       if (new_state != AMD_XGBE_AN_EVENT)
-               phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+again:
+       cur_state = priv->an_state;
 
-       return new_state;
-}
+       switch (priv->an_state) {
+       case AMD_XGBE_AN_READY:
+               priv->an_supported = 0;
+               break;
 
-static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       enum amd_xgbe_phy_rx *state;
-       int ret;
+       case AMD_XGBE_AN_PAGE_RECEIVED:
+               priv->an_state = amd_xgbe_an_page_received(phydev);
+               priv->an_supported++;
+               break;
 
-       state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
-                                               : &priv->kx_state;
+       case AMD_XGBE_AN_INCOMPAT_LINK:
+               priv->an_supported = 0;
+               priv->parallel_detect = 0;
+               priv->an_state = amd_xgbe_an_incompat_link(phydev);
+               break;
 
-       switch (*state) {
-       case AMD_XGBE_RX_BPA:
-               ret = amd_xgbe_an_rx_bpa(phydev, state);
+       case AMD_XGBE_AN_COMPLETE:
+               priv->parallel_detect = priv->an_supported ? 0 : 1;
+               netdev_dbg(phydev->attached_dev, "%s successful\n",
+                          priv->an_supported ? "Auto negotiation"
+                                             : "Parallel detection");
                break;
 
-       case AMD_XGBE_RX_XNP:
-               ret = amd_xgbe_an_rx_xnp(phydev, state);
+       case AMD_XGBE_AN_NO_LINK:
                break;
 
        default:
-               ret = AMD_XGBE_AN_ERROR;
+               priv->an_state = AMD_XGBE_AN_ERROR;
        }
 
-       return ret;
-}
+       if (priv->an_state == AMD_XGBE_AN_NO_LINK) {
+               int_reg = 0;
+               phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+       } else if (priv->an_state == AMD_XGBE_AN_ERROR) {
+               netdev_err(phydev->attached_dev,
+                          "error during auto-negotiation, state=%u\n",
+                          cur_state);
 
-static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
-{
-       int ret;
+               int_reg = 0;
+               phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+       }
 
-       ret = amd_xgbe_phy_switch_mode(phydev);
-       if (ret)
-               return AMD_XGBE_AN_ERROR;
+       if (priv->an_state >= AMD_XGBE_AN_COMPLETE) {
+               priv->an_result = priv->an_state;
+               priv->an_state = AMD_XGBE_AN_READY;
+               priv->kr_state = AMD_XGBE_RX_BPA;
+               priv->kx_state = AMD_XGBE_RX_BPA;
+       }
 
-       return AMD_XGBE_AN_START;
-}
+       if (cur_state != priv->an_state)
+               goto again;
 
-static void amd_xgbe_an_state_machine(struct work_struct *work)
-{
-       struct amd_xgbe_phy_priv *priv = container_of(work,
-                                                     struct amd_xgbe_phy_priv,
-                                                     an_work);
-       struct phy_device *phydev = priv->phydev;
-       enum amd_xgbe_phy_an cur_state;
-       int sleep;
-       unsigned int an_supported = 0;
+       if (int_reg)
+               goto next_int;
 
-       /* Start in KX mode */
-       if (amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX))
-               priv->an_state = AMD_XGBE_AN_ERROR;
+out:
+       enable_irq(priv->an_irq);
 
-       while (1) {
-               mutex_lock(&priv->an_mutex);
+       mutex_unlock(&priv->an_mutex);
+}
 
-               cur_state = priv->an_state;
+static int amd_xgbe_an_init(struct phy_device *phydev)
+{
+       int ret;
 
-               switch (priv->an_state) {
-               case AMD_XGBE_AN_START:
-                       an_supported = 0;
-                       priv->parallel_detect = 0;
-                       priv->an_state = amd_xgbe_an_start(phydev);
-                       break;
+       /* Set up Advertisement register 3 first */
+       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+       if (ret < 0)
+               return ret;
 
-               case AMD_XGBE_AN_EVENT:
-                       priv->an_state = amd_xgbe_an_event(phydev);
-                       break;
+       if (phydev->supported & SUPPORTED_10000baseR_FEC)
+               ret |= 0xc000;
+       else
+               ret &= ~0xc000;
 
-               case AMD_XGBE_AN_PAGE_RECEIVED:
-                       priv->an_state = amd_xgbe_an_page_received(phydev);
-                       an_supported++;
-                       break;
+       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
 
-               case AMD_XGBE_AN_INCOMPAT_LINK:
-                       priv->an_state = amd_xgbe_an_incompat_link(phydev);
-                       break;
+       /* Set up Advertisement register 2 next */
+       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+       if (ret < 0)
+               return ret;
 
-               case AMD_XGBE_AN_COMPLETE:
-                       priv->parallel_detect = an_supported ? 0 : 1;
-                       netdev_info(phydev->attached_dev, "%s successful\n",
-                                   an_supported ? "Auto negotiation"
-                                                : "Parallel detection");
-                       /* fall through */
+       if (phydev->supported & SUPPORTED_10000baseKR_Full)
+               ret |= 0x80;
+       else
+               ret &= ~0x80;
 
-               case AMD_XGBE_AN_NO_LINK:
-               case AMD_XGBE_AN_EXIT:
-                       goto exit_unlock;
+       if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
+           (phydev->supported & SUPPORTED_2500baseX_Full))
+               ret |= 0x20;
+       else
+               ret &= ~0x20;
 
-               default:
-                       priv->an_state = AMD_XGBE_AN_ERROR;
-               }
+       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
 
-               if (priv->an_state == AMD_XGBE_AN_ERROR) {
-                       netdev_err(phydev->attached_dev,
-                                  "error during auto-negotiation, state=%u\n",
-                                  cur_state);
-                       goto exit_unlock;
-               }
+       /* Set up Advertisement register 1 last */
+       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+       if (ret < 0)
+               return ret;
 
-               sleep = (priv->an_state == AMD_XGBE_AN_EVENT) ? 1 : 0;
+       if (phydev->supported & SUPPORTED_Pause)
+               ret |= 0x400;
+       else
+               ret &= ~0x400;
 
-               mutex_unlock(&priv->an_mutex);
+       if (phydev->supported & SUPPORTED_Asym_Pause)
+               ret |= 0x800;
+       else
+               ret &= ~0x800;
 
-               if (sleep)
-                       usleep_range(20, 50);
-       }
+       /* We don't intend to perform XNP */
+       ret &= ~XNP_NP_EXCHANGE;
 
-exit_unlock:
-       priv->an_result = priv->an_state;
-       priv->an_state = AMD_XGBE_AN_READY;
+       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
 
-       mutex_unlock(&priv->an_mutex);
+       return 0;
 }
 
 static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
@@ -992,20 +1125,57 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
        if (ret & MDIO_CTRL1_RESET)
                return -ETIMEDOUT;
 
-       /* Make sure the XPCS and SerDes are in compatible states */
-       return amd_xgbe_phy_xgmii_mode(phydev);
+       /* Disable auto-negotiation for now */
+       ret = amd_xgbe_phy_disable_an(phydev);
+       if (ret < 0)
+               return ret;
+
+       /* Clear auto-negotiation interrupts */
+       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+       return 0;
 }
 
 static int amd_xgbe_phy_config_init(struct phy_device *phydev)
 {
        struct amd_xgbe_phy_priv *priv = phydev->priv;
+       struct net_device *netdev = phydev->attached_dev;
+       int ret;
+
+       if (!priv->an_irq_allocated) {
+               /* Allocate the auto-negotiation workqueue and interrupt */
+               snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
+                        "%s-pcs", netdev_name(netdev));
+
+               priv->an_workqueue =
+                       create_singlethread_workqueue(priv->an_irq_name);
+               if (!priv->an_workqueue) {
+                       netdev_err(netdev, "phy workqueue creation failed\n");
+                       return -ENOMEM;
+               }
+
+               ret = devm_request_irq(priv->dev, priv->an_irq,
+                                      amd_xgbe_an_isr, 0, priv->an_irq_name,
+                                      priv);
+               if (ret) {
+                       netdev_err(netdev, "phy irq request failed\n");
+                       destroy_workqueue(priv->an_workqueue);
+                       return ret;
+               }
+
+               priv->an_irq_allocated = 1;
+       }
+
+       ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
+       if (ret < 0)
+               return ret;
+       priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
 
        /* Initialize supported features */
        phydev->supported = SUPPORTED_Autoneg;
        phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
        phydev->supported |= SUPPORTED_Backplane;
-       phydev->supported |= SUPPORTED_10000baseKR_Full |
-                            SUPPORTED_10000baseR_FEC;
+       phydev->supported |= SUPPORTED_10000baseKR_Full;
        switch (priv->speed_set) {
        case AMD_XGBE_PHY_SPEEDSET_1000_10000:
                phydev->supported |= SUPPORTED_1000baseKX_Full;
@@ -1014,11 +1184,33 @@ static int amd_xgbe_phy_config_init(struct phy_device *phydev)
                phydev->supported |= SUPPORTED_2500baseX_Full;
                break;
        }
+
+       if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
+               phydev->supported |= SUPPORTED_10000baseR_FEC;
+
        phydev->advertising = phydev->supported;
 
-       /* Turn off and clear interrupts */
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+       /* Set initial mode - call the mode setting routines
+        * directly to insure we are properly configured
+        */
+       if (phydev->supported & SUPPORTED_10000baseKR_Full)
+               ret = amd_xgbe_phy_xgmii_mode(phydev);
+       else if (phydev->supported & SUPPORTED_1000baseKX_Full)
+               ret = amd_xgbe_phy_gmii_mode(phydev);
+       else if (phydev->supported & SUPPORTED_2500baseX_Full)
+               ret = amd_xgbe_phy_gmii_2500_mode(phydev);
+       else
+               ret = -EINVAL;
+       if (ret < 0)
+               return ret;
+
+       /* Set up advertisement registers based on current settings */
+       ret = amd_xgbe_an_init(phydev);
+       if (ret)
+               return ret;
+
+       /* Enable auto-negotiation interrupts */
+       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
 
        return 0;
 }
@@ -1028,25 +1220,19 @@ static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
        int ret;
 
        /* Disable auto-negotiation */
-       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+       ret = amd_xgbe_phy_disable_an(phydev);
        if (ret < 0)
                return ret;
 
-       ret &= ~MDIO_AN_CTRL1_ENABLE;
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
-
        /* Validate/Set specified speed */
        switch (phydev->speed) {
        case SPEED_10000:
-               ret = amd_xgbe_phy_xgmii_mode(phydev);
+               ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
                break;
 
        case SPEED_2500:
-               ret = amd_xgbe_phy_gmii_2500_mode(phydev);
-               break;
-
        case SPEED_1000:
-               ret = amd_xgbe_phy_gmii_mode(phydev);
+               ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
                break;
 
        default:
@@ -1066,10 +1252,11 @@ static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
        return 0;
 }
 
-static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
+static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
 {
        struct amd_xgbe_phy_priv *priv = phydev->priv;
        u32 mmd_mask = phydev->c45_ids.devices_in_package;
+       int ret;
 
        if (phydev->autoneg != AUTONEG_ENABLE)
                return amd_xgbe_phy_setup_forced(phydev);
@@ -1078,56 +1265,79 @@ static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
        if (!(mmd_mask & MDIO_DEVS_AN))
                return -EINVAL;
 
-       /* Start/Restart the auto-negotiation state machine */
-       mutex_lock(&priv->an_mutex);
+       /* Disable auto-negotiation interrupt */
+       disable_irq(priv->an_irq);
+
+       /* Start auto-negotiation in a supported mode */
+       if (phydev->supported & SUPPORTED_10000baseKR_Full)
+               ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
+       else if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
+                (phydev->supported & SUPPORTED_2500baseX_Full))
+               ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
+       else
+               ret = -EINVAL;
+       if (ret < 0) {
+               enable_irq(priv->an_irq);
+               return ret;
+       }
+
+       /* Disable and stop any in progress auto-negotiation */
+       ret = amd_xgbe_phy_disable_an(phydev);
+       if (ret < 0)
+               return ret;
+
+       /* Clear any auto-negotitation interrupts */
+       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
        priv->an_result = AMD_XGBE_AN_READY;
-       priv->an_state = AMD_XGBE_AN_START;
-       priv->kr_state = AMD_XGBE_RX_READY;
-       priv->kx_state = AMD_XGBE_RX_READY;
-       mutex_unlock(&priv->an_mutex);
+       priv->an_state = AMD_XGBE_AN_READY;
+       priv->kr_state = AMD_XGBE_RX_BPA;
+       priv->kx_state = AMD_XGBE_RX_BPA;
 
-       queue_work(priv->an_workqueue, &priv->an_work);
+       /* Re-enable auto-negotiation interrupt */
+       enable_irq(priv->an_irq);
 
-       return 0;
+       /* Set up advertisement registers based on current settings */
+       ret = amd_xgbe_an_init(phydev);
+       if (ret)
+               return ret;
+
+       /* Enable and start auto-negotiation */
+       return amd_xgbe_phy_restart_an(phydev);
 }
 
-static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
+static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
 {
        struct amd_xgbe_phy_priv *priv = phydev->priv;
-       enum amd_xgbe_phy_an state;
+       int ret;
 
        mutex_lock(&priv->an_mutex);
-       state = priv->an_result;
+
+       ret = __amd_xgbe_phy_config_aneg(phydev);
+
        mutex_unlock(&priv->an_mutex);
 
-       return (state == AMD_XGBE_AN_COMPLETE);
+       return ret;
+}
+
+static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
+{
+       struct amd_xgbe_phy_priv *priv = phydev->priv;
+
+       return (priv->an_result == AMD_XGBE_AN_COMPLETE);
 }
 
 static int amd_xgbe_phy_update_link(struct phy_device *phydev)
 {
        struct amd_xgbe_phy_priv *priv = phydev->priv;
-       enum amd_xgbe_phy_an state;
-       unsigned int check_again, autoneg;
        int ret;
 
        /* If we're doing auto-negotiation don't report link down */
-       mutex_lock(&priv->an_mutex);
-       state = priv->an_state;
-       mutex_unlock(&priv->an_mutex);
-
-       if (state != AMD_XGBE_AN_READY) {
+       if (priv->an_state != AMD_XGBE_AN_READY) {
                phydev->link = 1;
                return 0;
        }
 
-       /* Since the device can be in the wrong mode when a link is
-        * (re-)established (cable connected after the interface is
-        * up, etc.), the link status may report no link. If there
-        * is no link, try switching modes and checking the status
-        * again if auto negotiation is enabled.
-        */
-       check_again = (phydev->autoneg == AUTONEG_ENABLE) ? 1 : 0;
-again:
        /* Link status is latched low, so read once to clear
         * and then read again to get current state
         */
@@ -1141,25 +1351,6 @@ again:
 
        phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
 
-       if (!phydev->link) {
-               if (check_again) {
-                       ret = amd_xgbe_phy_switch_mode(phydev);
-                       if (ret < 0)
-                               return ret;
-                       check_again = 0;
-                       goto again;
-               }
-       }
-
-       autoneg = (phydev->link && !priv->link) ? 1 : 0;
-       priv->link = phydev->link;
-       if (autoneg) {
-               /* Link is (back) up, re-start auto-negotiation */
-               ret = amd_xgbe_phy_config_aneg(phydev);
-               if (ret < 0)
-                       return ret;
-       }
-
        return 0;
 }
 
@@ -1249,6 +1440,7 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev)
 
 static int amd_xgbe_phy_suspend(struct phy_device *phydev)
 {
+       struct amd_xgbe_phy_priv *priv = phydev->priv;
        int ret;
 
        mutex_lock(&phydev->lock);
@@ -1257,6 +1449,8 @@ static int amd_xgbe_phy_suspend(struct phy_device *phydev)
        if (ret < 0)
                goto unlock;
 
+       priv->lpm_ctrl = ret;
+
        ret |= MDIO_CTRL1_LPOWER;
        phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
 
@@ -1270,69 +1464,106 @@ unlock:
 
 static int amd_xgbe_phy_resume(struct phy_device *phydev)
 {
-       int ret;
+       struct amd_xgbe_phy_priv *priv = phydev->priv;
 
        mutex_lock(&phydev->lock);
 
-       ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
-       if (ret < 0)
-               goto unlock;
+       priv->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
+       phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, priv->lpm_ctrl);
 
-       ret &= ~MDIO_CTRL1_LPOWER;
-       phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+       mutex_unlock(&phydev->lock);
 
-       ret = 0;
+       return 0;
+}
 
-unlock:
-       mutex_unlock(&phydev->lock);
+static unsigned int amd_xgbe_phy_resource_count(struct platform_device *pdev,
+                                               unsigned int type)
+{
+       unsigned int count;
+       int i;
 
-       return ret;
+       for (i = 0, count = 0; i < pdev->num_resources; i++) {
+               struct resource *r = &pdev->resource[i];
+
+               if (type == resource_type(r))
+                       count++;
+       }
+
+       return count;
 }
 
 static int amd_xgbe_phy_probe(struct phy_device *phydev)
 {
        struct amd_xgbe_phy_priv *priv;
-       struct platform_device *pdev;
-       struct device *dev;
-       char *wq_name;
-       const __be32 *property;
-       unsigned int speed_set;
+       struct platform_device *phy_pdev;
+       struct device *dev, *phy_dev;
+       unsigned int phy_resnum, phy_irqnum;
        int ret;
 
-       if (!phydev->dev.of_node)
+       if (!phydev->bus || !phydev->bus->parent)
                return -EINVAL;
 
-       pdev = of_find_device_by_node(phydev->dev.of_node);
-       if (!pdev)
-               return -EINVAL;
-       dev = &pdev->dev;
-
-       wq_name = kasprintf(GFP_KERNEL, "%s-amd-xgbe-phy", phydev->bus->name);
-       if (!wq_name) {
-               ret = -ENOMEM;
-               goto err_pdev;
-       }
+       dev = phydev->bus->parent;
 
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-       if (!priv) {
-               ret = -ENOMEM;
-               goto err_name;
-       }
+       if (!priv)
+               return -ENOMEM;
 
-       priv->pdev = pdev;
+       priv->pdev = to_platform_device(dev);
+       priv->adev = ACPI_COMPANION(dev);
        priv->dev = dev;
        priv->phydev = phydev;
+       mutex_init(&priv->an_mutex);
+       INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
+       INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
+
+       if (!priv->adev || acpi_disabled) {
+               struct device_node *bus_node;
+               struct device_node *phy_node;
+
+               bus_node = priv->dev->of_node;
+               phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
+               if (!phy_node) {
+                       dev_err(dev, "unable to parse phy-handle\n");
+                       ret = -EINVAL;
+                       goto err_priv;
+               }
+
+               phy_pdev = of_find_device_by_node(phy_node);
+               of_node_put(phy_node);
+
+               if (!phy_pdev) {
+                       dev_err(dev, "unable to obtain phy device\n");
+                       ret = -EINVAL;
+                       goto err_priv;
+               }
+
+               phy_resnum = 0;
+               phy_irqnum = 0;
+       } else {
+               /* In ACPI, the XGBE and PHY resources are the grouped
+                * together with the PHY resources at the end
+                */
+               phy_pdev = priv->pdev;
+               phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
+                                                        IORESOURCE_MEM) - 3;
+               phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
+                                                        IORESOURCE_IRQ) - 1;
+       }
+       phy_dev = &phy_pdev->dev;
 
        /* Get the device mmio areas */
-       priv->rxtx_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
+                                              phy_resnum++);
        priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
        if (IS_ERR(priv->rxtx_regs)) {
                dev_err(dev, "rxtx ioremap failed\n");
                ret = PTR_ERR(priv->rxtx_regs);
-               goto err_priv;
+               goto err_put;
        }
 
-       priv->sir0_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       priv->sir0_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
+                                              phy_resnum++);
        priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
        if (IS_ERR(priv->sir0_regs)) {
                dev_err(dev, "sir0 ioremap failed\n");
@@ -1340,7 +1571,8 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
                goto err_rxtx;
        }
 
-       priv->sir1_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+       priv->sir1_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
+                                              phy_resnum++);
        priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
        if (IS_ERR(priv->sir1_regs)) {
                dev_err(dev, "sir1 ioremap failed\n");
@@ -1348,40 +1580,98 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
                goto err_sir0;
        }
 
+       /* Get the auto-negotiation interrupt */
+       ret = platform_get_irq(phy_pdev, phy_irqnum);
+       if (ret < 0) {
+               dev_err(dev, "platform_get_irq failed\n");
+               goto err_sir1;
+       }
+       priv->an_irq = ret;
+
        /* Get the device speed set property */
-       speed_set = 0;
-       property = of_get_property(dev->of_node, XGBE_PHY_SPEEDSET_PROPERTY,
-                                  NULL);
-       if (property)
-               speed_set = be32_to_cpu(*property);
-
-       switch (speed_set) {
-       case 0:
-               priv->speed_set = AMD_XGBE_PHY_SPEEDSET_1000_10000;
-               break;
-       case 1:
-               priv->speed_set = AMD_XGBE_PHY_SPEEDSET_2500_10000;
+       ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
+                                      &priv->speed_set);
+       if (ret) {
+               dev_err(dev, "invalid %s property\n",
+                       XGBE_PHY_SPEEDSET_PROPERTY);
+               goto err_sir1;
+       }
+
+       switch (priv->speed_set) {
+       case AMD_XGBE_PHY_SPEEDSET_1000_10000:
+       case AMD_XGBE_PHY_SPEEDSET_2500_10000:
                break;
        default:
-               dev_err(dev, "invalid amd,speed-set property\n");
+               dev_err(dev, "invalid %s property\n",
+                       XGBE_PHY_SPEEDSET_PROPERTY);
                ret = -EINVAL;
                goto err_sir1;
        }
 
-       priv->link = 1;
+       if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_PHY_BLWC_PROPERTY,
+                                                    priv->serdes_blwc,
+                                                    XGBE_PHY_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_PHY_BLWC_PROPERTY);
+                       goto err_sir1;
+               }
+       } else {
+               memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
+                      sizeof(priv->serdes_blwc));
+       }
 
-       mutex_init(&priv->an_mutex);
-       INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
-       priv->an_workqueue = create_singlethread_workqueue(wq_name);
-       if (!priv->an_workqueue) {
-               ret = -ENOMEM;
-               goto err_sir1;
+       if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_PHY_CDR_RATE_PROPERTY,
+                                                    priv->serdes_cdr_rate,
+                                                    XGBE_PHY_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_PHY_CDR_RATE_PROPERTY);
+                       goto err_sir1;
+               }
+       } else {
+               memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
+                      sizeof(priv->serdes_cdr_rate));
+       }
+
+       if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_PHY_PQ_SKEW_PROPERTY,
+                                                    priv->serdes_pq_skew,
+                                                    XGBE_PHY_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_PHY_PQ_SKEW_PROPERTY);
+                       goto err_sir1;
+               }
+       } else {
+               memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
+                      sizeof(priv->serdes_pq_skew));
+       }
+
+       if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_PHY_TX_AMP_PROPERTY,
+                                                    priv->serdes_tx_amp,
+                                                    XGBE_PHY_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_PHY_TX_AMP_PROPERTY);
+                       goto err_sir1;
+               }
+       } else {
+               memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
+                      sizeof(priv->serdes_tx_amp));
        }
 
        phydev->priv = priv;
 
-       kfree(wq_name);
-       of_dev_put(pdev);
+       if (!priv->adev || acpi_disabled)
+               platform_device_put(phy_pdev);
 
        return 0;
 
@@ -1400,15 +1690,13 @@ err_rxtx:
        devm_release_mem_region(dev, priv->rxtx_res->start,
                                resource_size(priv->rxtx_res));
 
+err_put:
+       if (!priv->adev || acpi_disabled)
+               platform_device_put(phy_pdev);
+
 err_priv:
        devm_kfree(dev, priv);
 
-err_name:
-       kfree(wq_name);
-
-err_pdev:
-       of_dev_put(pdev);
-
        return ret;
 }
 
@@ -1417,13 +1705,12 @@ static void amd_xgbe_phy_remove(struct phy_device *phydev)
        struct amd_xgbe_phy_priv *priv = phydev->priv;
        struct device *dev = priv->dev;
 
-       /* Stop any in process auto-negotiation */
-       mutex_lock(&priv->an_mutex);
-       priv->an_state = AMD_XGBE_AN_EXIT;
-       mutex_unlock(&priv->an_mutex);
+       if (priv->an_irq_allocated) {
+               devm_free_irq(dev, priv->an_irq, priv);
 
-       flush_workqueue(priv->an_workqueue);
-       destroy_workqueue(priv->an_workqueue);
+               flush_workqueue(priv->an_workqueue);
+               destroy_workqueue(priv->an_workqueue);
+       }
 
        /* Release resources */
        devm_iounmap(dev, priv->sir1_regs);
index 3ad0e6e16c395e61c0f31bd9ab6b30b3fb6adf4e..a08a3c78ba97b08fb503bace451a3d177fc04053 100644 (file)
@@ -168,7 +168,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
        struct fixed_mdio_bus *fmb = &platform_fmb;
        struct fixed_phy *fp;
 
-       if (!link_update || !phydev || !phydev->bus)
+       if (!phydev || !phydev->bus)
                return -EINVAL;
 
        list_for_each_entry(fp, &fmb->phys, node) {
index 50051f271b10e8c02f34fae073f8d48ee15b8a97..095ef3fe369af5ebe08254384abc38176df1aef1 100644 (file)
@@ -443,9 +443,13 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
        if (!drv || !phydrv->suspend)
                return false;
 
-       /* PHY not attached? May suspend. */
+       /* PHY not attached? May suspend if the PHY has not already been
+        * suspended as part of a prior call to phy_disconnect() ->
+        * phy_detach() -> phy_suspend() because the parent netdev might be the
+        * MDIO bus driver and clock gated at this point.
+        */
        if (!netdev)
-               return true;
+               return !phydev->suspended;
 
        /* Don't suspend PHY if the attched netdev parent may wakeup.
         * The parent may point to a PCI device, as in tg3 driver.
@@ -465,7 +469,6 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
 
 static int mdio_bus_suspend(struct device *dev)
 {
-       struct phy_driver *phydrv = to_phy_driver(dev->driver);
        struct phy_device *phydev = to_phy_device(dev);
 
        /* We must stop the state machine manually, otherwise it stops out of
@@ -479,19 +482,18 @@ static int mdio_bus_suspend(struct device *dev)
        if (!mdio_bus_phy_may_suspend(phydev))
                return 0;
 
-       return phydrv->suspend(phydev);
+       return phy_suspend(phydev);
 }
 
 static int mdio_bus_resume(struct device *dev)
 {
-       struct phy_driver *phydrv = to_phy_driver(dev->driver);
        struct phy_device *phydev = to_phy_device(dev);
        int ret;
 
        if (!mdio_bus_phy_may_suspend(phydev))
                goto no_resume;
 
-       ret = phydrv->resume(phydev);
+       ret = phy_resume(phydev);
        if (ret < 0)
                return ret;
 
index 767cd110f49688d2b4118ba4dffc1a373972e56d..cdcac6aa4260b32927d7c903e024b42e5d17861e 100644 (file)
@@ -439,6 +439,9 @@ int phy_start_aneg(struct phy_device *phydev)
        if (AUTONEG_DISABLE == phydev->autoneg)
                phy_sanitize_settings(phydev);
 
+       /* Invalidate LP advertising flags */
+       phydev->lp_advertising = 0;
+
        err = phydev->drv->config_aneg(phydev);
        if (err < 0)
                goto out_unlock;
index 3fc91e89f5a564bb36ab251c81ec083fc7cab68d..bdfe51fc3a6507154edfcaf8be3413884bcf702f 100644 (file)
@@ -699,6 +699,7 @@ int phy_suspend(struct phy_device *phydev)
 {
        struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
        struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+       int ret = 0;
 
        /* If the device has WOL enabled, we cannot suspend the PHY */
        phy_ethtool_get_wol(phydev, &wol);
@@ -706,18 +707,31 @@ int phy_suspend(struct phy_device *phydev)
                return -EBUSY;
 
        if (phydrv->suspend)
-               return phydrv->suspend(phydev);
-       return 0;
+               ret = phydrv->suspend(phydev);
+
+       if (ret)
+               return ret;
+
+       phydev->suspended = true;
+
+       return ret;
 }
 EXPORT_SYMBOL(phy_suspend);
 
 int phy_resume(struct phy_device *phydev)
 {
        struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
+       int ret = 0;
 
        if (phydrv->resume)
-               return phydrv->resume(phydev);
-       return 0;
+               ret = phydrv->resume(phydev);
+
+       if (ret)
+               return ret;
+
+       phydev->suspended = false;
+
+       return ret;
 }
 EXPORT_SYMBOL(phy_resume);
 
index 602c625d95d5e26ba0c79f9ae6af2dd92f1f3c93..b5edc7f96a392d0080400ed4285cfb84e86d9e5c 100644 (file)
@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
        /*
         * See if we managed to reduce the size of the packet.
         */
-       if (olen < isize) {
+       if (olen < isize && olen <= osize) {
                state->stats.comp_bytes += olen;
                state->stats.comp_packets++;
        } else {
index 93e224217e24b36b089102be11ada5921f62d83b..0e62274e884a89de170d668795b40c3620d41046 100644 (file)
@@ -28,6 +28,7 @@
 #include <net/genetlink.h>
 #include <net/netlink.h>
 #include <net/sch_generic.h>
+#include <net/switchdev.h>
 #include <generated/utsrelease.h>
 #include <linux/if_team.h>
 
@@ -176,7 +177,6 @@ static int __team_option_inst_add(struct team *team, struct team_option *option,
 static int __team_option_inst_add_option(struct team *team,
                                         struct team_option *option)
 {
-       struct team_port *port;
        int err;
 
        if (!option->per_port) {
@@ -184,12 +184,6 @@ static int __team_option_inst_add_option(struct team *team,
                if (err)
                        goto inst_del_option;
        }
-
-       list_for_each_entry(port, &team->port_list, list) {
-               err = __team_option_inst_add(team, option, port);
-               if (err)
-                       goto inst_del_option;
-       }
        return 0;
 
 inst_del_option:
@@ -629,6 +623,7 @@ static int team_change_mode(struct team *team, const char *kind)
 static void team_notify_peers_work(struct work_struct *work)
 {
        struct team *team;
+       int val;
 
        team = container_of(work, struct team, notify_peers.dw.work);
 
@@ -636,9 +631,14 @@ static void team_notify_peers_work(struct work_struct *work)
                schedule_delayed_work(&team->notify_peers.dw, 0);
                return;
        }
+       val = atomic_dec_if_positive(&team->notify_peers.count_pending);
+       if (val < 0) {
+               rtnl_unlock();
+               return;
+       }
        call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
        rtnl_unlock();
-       if (!atomic_dec_and_test(&team->notify_peers.count_pending))
+       if (val)
                schedule_delayed_work(&team->notify_peers.dw,
                                      msecs_to_jiffies(team->notify_peers.interval));
 }
@@ -669,6 +669,7 @@ static void team_notify_peers_fini(struct team *team)
 static void team_mcast_rejoin_work(struct work_struct *work)
 {
        struct team *team;
+       int val;
 
        team = container_of(work, struct team, mcast_rejoin.dw.work);
 
@@ -676,9 +677,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
                schedule_delayed_work(&team->mcast_rejoin.dw, 0);
                return;
        }
+       val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
+       if (val < 0) {
+               rtnl_unlock();
+               return;
+       }
        call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
        rtnl_unlock();
-       if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
+       if (val)
                schedule_delayed_work(&team->mcast_rejoin.dw,
                                      msecs_to_jiffies(team->mcast_rejoin.interval));
 }
@@ -1920,7 +1926,7 @@ static netdev_features_t team_fix_features(struct net_device *dev,
        struct team *team = netdev_priv(dev);
        netdev_features_t mask;
 
-       mask = features;
+       mask = features | NETIF_F_HW_SWITCH_OFFLOAD;
        features &= ~NETIF_F_ONE_FOR_ALL;
        features |= NETIF_F_ALL_FOR_ALL;
 
@@ -1970,6 +1976,8 @@ static const struct net_device_ops team_netdev_ops = {
        .ndo_del_slave          = team_del_slave,
        .ndo_fix_features       = team_fix_features,
        .ndo_change_carrier     = team_change_carrier,
+       .ndo_bridge_setlink     = ndo_dflt_netdev_switch_port_bridge_setlink,
+       .ndo_bridge_dellink     = ndo_dflt_netdev_switch_port_bridge_dellink,
 };
 
 /***********************
index c0df872f5b8c53818ca7b7c130f1fcb0fb972e89..857dca47bf80eb9127e9e11d70c3cd681d114aae 100644 (file)
@@ -65,7 +65,6 @@
 #include <linux/nsproxy.h>
 #include <linux/virtio_net.h>
 #include <linux/rcupdate.h>
-#include <net/ipv6.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
@@ -124,10 +123,9 @@ struct tap_filter {
        unsigned char   addr[FLT_EXACT_COUNT][ETH_ALEN];
 };
 
-/* DEFAULT_MAX_NUM_RSS_QUEUES were chosen to let the rx/tx queues allocated for
- * the netdevice to be fit in one page. So we can make sure the success of
- * memory allocation. TODO: increase the limit. */
-#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
+/* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
+ * to max number of VCPUs in guest. */
+#define MAX_TAP_QUEUES 256
 #define MAX_TAP_FLOWS  4096
 
 #define TUN_FLOW_EXPIRE (3 * HZ)
@@ -187,7 +185,7 @@ struct tun_struct {
        struct net_device       *dev;
        netdev_features_t       set_features;
 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
-                         NETIF_F_TSO6)
+                         NETIF_F_TSO6|NETIF_F_UFO)
 
        int                     vnet_hdr_sz;
        int                     sndbuf;
@@ -258,7 +256,6 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
 {
        tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
                  e->rxhash, e->queue_index);
-       sock_rps_reset_flow_hash(e->rps_rxhash);
        hlist_del_rcu(&e->hash_link);
        kfree_rcu(e, rcu);
        --tun->flow_count;
@@ -375,10 +372,8 @@ unlock:
  */
 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
 {
-       if (unlikely(e->rps_rxhash != hash)) {
-               sock_rps_reset_flow_hash(e->rps_rxhash);
+       if (unlikely(e->rps_rxhash != hash))
                e->rps_rxhash = hash;
-       }
 }
 
 /* We try to identify a flow through its rxhash first. The reason that
@@ -1167,8 +1162,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                break;
        }
 
-       skb_reset_network_header(skb);
-
        if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                pr_debug("GSO!\n");
                switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -1179,20 +1172,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
-               {
-                       static bool warned;
-
-                       if (!warned) {
-                               warned = true;
-                               netdev_warn(tun->dev,
-                                           "%s: using disabled UFO feature; please fix this program\n",
-                                           current->comm);
-                       }
                        skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
-                       if (skb->protocol == htons(ETH_P_IPV6))
-                               ipv6_proxy_select_ident(skb);
                        break;
-               }
                default:
                        tun->dev->stats.rx_frame_errors++;
                        kfree_skb(skb);
@@ -1221,6 +1202,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
        }
 
+       skb_reset_network_header(skb);
        skb_probe_transport_header(skb, 0);
 
        rxhash = skb_get_hash(skb);
@@ -1261,7 +1243,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
        int vlan_hlen = 0;
        int vnet_hdr_sz = 0;
 
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                vlan_hlen = VLAN_HLEN;
 
        if (tun->flags & IFF_VNET_HDR)
@@ -1298,6 +1280,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                                gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                        else if (sinfo->gso_type & SKB_GSO_TCPV6)
                                gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+                       else if (sinfo->gso_type & SKB_GSO_UDP)
+                               gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
                        else {
                                pr_err("unexpected GSO type: "
                                       "0x%x, gso_size %d, hdr_len %d\n",
@@ -1338,7 +1322,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                } veth;
 
                veth.h_vlan_proto = skb->vlan_proto;
-               veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+               veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
 
                vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
 
@@ -1566,6 +1550,17 @@ static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
 static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
 static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
 
+static struct attribute *tun_dev_attrs[] = {
+       &dev_attr_tun_flags.attr,
+       &dev_attr_owner.attr,
+       &dev_attr_group.attr,
+       NULL
+};
+
+static const struct attribute_group tun_attr_group = {
+       .attrs = tun_dev_attrs
+};
+
 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 {
        struct tun_struct *tun;
@@ -1646,6 +1641,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                dev_net_set(dev, net);
                dev->rtnl_link_ops = &tun_link_ops;
                dev->ifindex = tfile->ifindex;
+               dev->sysfs_groups[0] = &tun_attr_group;
 
                tun = netdev_priv(dev);
                tun->dev = dev;
@@ -1681,11 +1677,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                err = register_netdevice(tun->dev);
                if (err < 0)
                        goto err_detach;
-
-               if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
-                   device_create_file(&tun->dev->dev, &dev_attr_owner) ||
-                   device_create_file(&tun->dev->dev, &dev_attr_group))
-                       pr_err("Failed to create tun sysfs files\n");
        }
 
        netif_carrier_on(tun->dev);
@@ -1746,6 +1737,11 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
                                features |= NETIF_F_TSO6;
                        arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
                }
+
+               if (arg & TUN_F_UFO) {
+                       features |= NETIF_F_UFO;
+                       arg &= ~TUN_F_UFO;
+               }
        }
 
        /* This gives the user a way to test for new features in future by
index 9c5aa922a9f4bdb404588b74962769510be13887..6b8efcabb816459336be87dda7f9a40536067c46 100644 (file)
@@ -58,7 +58,6 @@
 #include <linux/module.h>
 #include <linux/ethtool.h>
 #include <linux/usb.h>
-#include <linux/timer.h>
 #include <linux/tty.h>
 #include <linux/tty_driver.h>
 #include <linux/tty_flip.h>
@@ -154,6 +153,7 @@ struct hso_net {
        struct hso_device *parent;
        struct net_device *net;
        struct rfkill *rfkill;
+       char name[24];
 
        struct usb_endpoint_descriptor *in_endp;
        struct usb_endpoint_descriptor *out_endp;
@@ -274,7 +274,6 @@ struct hso_device {
        u8 usb_gone;
        struct work_struct async_get_intf;
        struct work_struct async_put_intf;
-       struct work_struct reset_device;
 
        struct usb_device *usb;
        struct usb_interface *interface;
@@ -340,7 +339,6 @@ static void async_put_intf(struct work_struct *data);
 static int hso_put_activity(struct hso_device *hso_dev);
 static int hso_get_activity(struct hso_device *hso_dev);
 static void tiocmget_intr_callback(struct urb *urb);
-static void reset_device(struct work_struct *data);
 /*****************************************************************************/
 /* Helping functions                                                         */
 /*****************************************************************************/
@@ -533,6 +531,13 @@ static ssize_t hso_sysfs_show_porttype(struct device *dev,
 }
 static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL);
 
+static struct attribute *hso_serial_dev_attrs[] = {
+       &dev_attr_hsotype.attr,
+       NULL
+};
+
+ATTRIBUTE_GROUPS(hso_serial_dev);
+
 static int hso_urb_to_index(struct hso_serial *serial, struct urb *urb)
 {
        int idx;
@@ -696,7 +701,7 @@ static void handle_usb_error(int status, const char *function,
        case -ETIMEDOUT:
                explanation = "protocol error";
                if (hso_dev)
-                       schedule_work(&hso_dev->reset_device);
+                       usb_queue_reset_device(hso_dev->interface);
                break;
        default:
                explanation = "unknown status";
@@ -1271,7 +1276,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
                goto err_out;
 
        D1("Opening %d", serial->minor);
-       kref_get(&serial->parent->ref);
 
        /* setup */
        tty->driver_data = serial;
@@ -1290,7 +1294,8 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
                if (result) {
                        hso_stop_serial_device(serial->parent);
                        serial->port.count--;
-                       kref_put(&serial->parent->ref, hso_serial_ref_free);
+               } else {
+                       kref_get(&serial->parent->ref);
                }
        } else {
                D1("Port was already open");
@@ -1340,8 +1345,6 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
                usb_autopm_put_interface(serial->parent->interface);
 
        mutex_unlock(&serial->parent->mutex);
-
-       kref_put(&serial->parent->ref, hso_serial_ref_free);
 }
 
 /* close the requested serial port */
@@ -1392,6 +1395,16 @@ static int hso_serial_write_room(struct tty_struct *tty)
        return room;
 }
 
+static void hso_serial_cleanup(struct tty_struct *tty)
+{
+       struct hso_serial *serial = tty->driver_data;
+
+       if (!serial)
+               return;
+
+       kref_put(&serial->parent->ref, hso_serial_ref_free);
+}
+
 /* setup the term */
 static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
 {
@@ -2198,8 +2211,8 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
 
        for (i = 0; i < serial->num_rx_urbs; i++) {
                if (serial->rx_urb[i]) {
-                               usb_kill_urb(serial->rx_urb[i]);
-                               serial->rx_urb_filled[i] = 0;
+                       usb_kill_urb(serial->rx_urb[i]);
+                       serial->rx_urb_filled[i] = 0;
                }
        }
        serial->curr_rx_urb_idx = 0;
@@ -2228,15 +2241,15 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
        return 0;
 }
 
+static void hso_serial_tty_unregister(struct hso_serial *serial)
+{
+       tty_unregister_device(tty_drv, serial->minor);
+}
+
 static void hso_serial_common_free(struct hso_serial *serial)
 {
        int i;
 
-       if (serial->parent->dev)
-               device_remove_file(serial->parent->dev, &dev_attr_hsotype);
-
-       tty_unregister_device(tty_drv, serial->minor);
-
        for (i = 0; i < serial->num_rx_urbs; i++) {
                /* unlink and free RX URB */
                usb_free_urb(serial->rx_urb[i]);
@@ -2246,6 +2259,7 @@ static void hso_serial_common_free(struct hso_serial *serial)
 
        /* unlink and free TX URB */
        usb_free_urb(serial->tx_urb);
+       kfree(serial->tx_buffer);
        kfree(serial->tx_data);
        tty_port_destroy(&serial->port);
 }
@@ -2264,11 +2278,10 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
                goto exit;
 
        /* register our minor number */
-       serial->parent->dev = tty_port_register_device(&serial->port, tty_drv,
-                       minor, &serial->parent->interface->dev);
+       serial->parent->dev = tty_port_register_device_attr(&serial->port,
+                       tty_drv, minor, &serial->parent->interface->dev,
+                       serial->parent, hso_serial_dev_groups);
        dev = serial->parent->dev;
-       dev_set_drvdata(dev, serial->parent);
-       i = device_create_file(dev, &dev_attr_hsotype);
 
        /* fill in specific data for later use */
        serial->minor = minor;
@@ -2316,6 +2329,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
 
        return 0;
 exit:
+       hso_serial_tty_unregister(serial);
        hso_serial_common_free(serial);
        return -1;
 }
@@ -2338,7 +2352,6 @@ static struct hso_device *hso_create_device(struct usb_interface *intf,
 
        INIT_WORK(&hso_dev->async_get_intf, async_get_intf);
        INIT_WORK(&hso_dev->async_put_intf, async_put_intf);
-       INIT_WORK(&hso_dev->reset_device, reset_device);
 
        return hso_dev;
 }
@@ -2459,27 +2472,21 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
 {
        struct hso_net *hso_net = dev2net(hso_dev);
        struct device *dev = &hso_net->net->dev;
-       char *rfkn;
+       static u32 rfkill_counter;
 
-       rfkn = kzalloc(20, GFP_KERNEL);
-       if (!rfkn)
-               dev_err(dev, "%s - Out of memory\n", __func__);
-
-       snprintf(rfkn, 20, "hso-%d",
-                interface->altsetting->desc.bInterfaceNumber);
+       snprintf(hso_net->name, sizeof(hso_net->name), "hso-%d",
+                rfkill_counter++);
 
-       hso_net->rfkill = rfkill_alloc(rfkn,
+       hso_net->rfkill = rfkill_alloc(hso_net->name,
                                       &interface_to_usbdev(interface)->dev,
                                       RFKILL_TYPE_WWAN,
                                       &hso_rfkill_ops, hso_dev);
        if (!hso_net->rfkill) {
                dev_err(dev, "%s - Out of memory\n", __func__);
-               kfree(rfkn);
                return;
        }
        if (rfkill_register(hso_net->rfkill) < 0) {
                rfkill_destroy(hso_net->rfkill);
-               kfree(rfkn);
                hso_net->rfkill = NULL;
                dev_err(dev, "%s - Failed to register rfkill\n", __func__);
                return;
@@ -2594,7 +2601,6 @@ static void hso_free_serial_device(struct hso_device *hso_dev)
 
        if (!serial)
                return;
-       set_serial_by_index(serial->minor, NULL);
 
        hso_serial_common_free(serial);
 
@@ -2684,6 +2690,7 @@ static struct hso_device *hso_create_bulk_serial_device(
        return hso_dev;
 
 exit2:
+       hso_serial_tty_unregister(serial);
        hso_serial_common_free(serial);
 exit:
        hso_free_tiomget(serial);
@@ -3083,26 +3090,6 @@ out:
        return result;
 }
 
-static void reset_device(struct work_struct *data)
-{
-       struct hso_device *hso_dev =
-           container_of(data, struct hso_device, reset_device);
-       struct usb_device *usb = hso_dev->usb;
-       int result;
-
-       if (hso_dev->usb_gone) {
-               D1("No reset during disconnect\n");
-       } else {
-               result = usb_lock_device_for_reset(usb, hso_dev->interface);
-               if (result < 0)
-                       D1("unable to lock device for reset: %d\n", result);
-               else {
-                       usb_reset_device(usb);
-                       usb_unlock_device(usb);
-               }
-       }
-}
-
 static void hso_serial_ref_free(struct kref *ref)
 {
        struct hso_device *hso_dev = container_of(ref, struct hso_device, ref);
@@ -3112,18 +3099,22 @@ static void hso_serial_ref_free(struct kref *ref)
 
 static void hso_free_interface(struct usb_interface *interface)
 {
-       struct hso_serial *hso_dev;
+       struct hso_serial *serial;
        int i;
 
        for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
                if (serial_table[i] &&
                    (serial_table[i]->interface == interface)) {
-                       hso_dev = dev2ser(serial_table[i]);
-                       tty_port_tty_hangup(&hso_dev->port, false);
-                       mutex_lock(&hso_dev->parent->mutex);
-                       hso_dev->parent->usb_gone = 1;
-                       mutex_unlock(&hso_dev->parent->mutex);
+                       serial = dev2ser(serial_table[i]);
+                       tty_port_tty_hangup(&serial->port, false);
+                       mutex_lock(&serial->parent->mutex);
+                       serial->parent->usb_gone = 1;
+                       mutex_unlock(&serial->parent->mutex);
+                       cancel_work_sync(&serial_table[i]->async_put_intf);
+                       cancel_work_sync(&serial_table[i]->async_get_intf);
+                       hso_serial_tty_unregister(serial);
                        kref_put(&serial_table[i]->ref, hso_serial_ref_free);
+                       set_serial_by_index(i, NULL);
                }
        }
 
@@ -3215,6 +3206,7 @@ static const struct tty_operations hso_serial_ops = {
        .close = hso_serial_close,
        .write = hso_serial_write,
        .write_room = hso_serial_write_room,
+       .cleanup = hso_serial_cleanup,
        .ioctl = hso_serial_ioctl,
        .set_termios = hso_serial_set_termios,
        .chars_in_buffer = hso_serial_chars_in_buffer,
index dcb6d33141e0640f545555848434d8efd7822878..1e9cdca370144cffb22141e4c03aa3ff800aefdd 100644 (file)
@@ -1276,7 +1276,7 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
         awd.done = 0;
 
         urb->context = &awd;
-        status = usb_submit_urb(urb, GFP_NOIO);
+        status = usb_submit_urb(urb, GFP_ATOMIC);
         if (status) {
                 // something went wrong
                 usb_free_urb(urb);
index b8a82b86f909095632c7d5747b9bf25cb81c970e..602dc6668c3af7ce9f6cc4ddd61437ba2f6adf29 100644 (file)
@@ -56,6 +56,8 @@ struct qmi_wwan_state {
 /* default ethernet address used by the modem */
 static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
 
+static const u8 buggy_fw_addr[ETH_ALEN] = {0x00, 0xa0, 0xc6, 0x00, 0x00, 0x00};
+
 /* Make up an ethernet header if the packet doesn't have one.
  *
  * A firmware bug common among several devices cause them to send raw
@@ -332,10 +334,12 @@ next_desc:
                usb_driver_release_interface(driver, info->data);
        }
 
-       /* Never use the same address on both ends of the link, even
-        * if the buggy firmware told us to.
+       /* Never use the same address on both ends of the link, even if the
+        * buggy firmware told us to. Or, if device is assigned the well-known
+        * buggy firmware MAC address, replace it with a random address,
         */
-       if (ether_addr_equal(dev->net->dev_addr, default_modem_addr))
+       if (ether_addr_equal(dev->net->dev_addr, default_modem_addr) ||
+           ether_addr_equal(dev->net->dev_addr, buggy_fw_addr))
                eth_hw_addr_random(dev->net);
 
        /* make MAC addr easily distinguishable from an IP header */
index 2d1c77e81836c617364d668eecf064d0256063f5..5980ac6c48dd47bbf00e8c6367c8bf7d9f66aa8c 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/usb/cdc.h>
 
 /* Version Information */
-#define DRIVER_VERSION "v1.07.0 (2014/10/09)"
+#define DRIVER_VERSION "v1.08.0 (2015/01/13)"
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
 #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
 #define MODULENAME "r8152"
@@ -448,6 +448,7 @@ enum rtl_register_content {
 #define RTL8152_RMS            (VLAN_ETH_FRAME_LEN + VLAN_HLEN)
 #define RTL8153_RMS            RTL8153_MAX_PACKET
 #define RTL8152_TX_TIMEOUT     (5 * HZ)
+#define RTL8152_NAPI_WEIGHT    64
 
 /* rtl8152 flags */
 enum rtl8152_flags {
@@ -457,7 +458,7 @@ enum rtl8152_flags {
        RTL8152_LINK_CHG,
        SELECTIVE_SUSPEND,
        PHY_RESET,
-       SCHEDULE_TASKLET,
+       SCHEDULE_NAPI,
 };
 
 /* Define these values to match your device */
@@ -488,16 +489,16 @@ struct rx_desc {
 #define RX_LEN_MASK                    0x7fff
 
        __le32 opts2;
-#define RD_UDP_CS                      (1 << 23)
-#define RD_TCP_CS                      (1 << 22)
-#define RD_IPV6_CS                     (1 << 20)
-#define RD_IPV4_CS                     (1 << 19)
+#define RD_UDP_CS                      BIT(23)
+#define RD_TCP_CS                      BIT(22)
+#define RD_IPV6_CS                     BIT(20)
+#define RD_IPV4_CS                     BIT(19)
 
        __le32 opts3;
-#define IPF                            (1 << 23) /* IP checksum fail */
-#define UDPF                           (1 << 22) /* UDP checksum fail */
-#define TCPF                           (1 << 21) /* TCP checksum fail */
-#define RX_VLAN_TAG                    (1 << 16)
+#define IPF                            BIT(23) /* IP checksum fail */
+#define UDPF                           BIT(22) /* UDP checksum fail */
+#define TCPF                           BIT(21) /* TCP checksum fail */
+#define RX_VLAN_TAG                    BIT(16)
 
        __le32 opts4;
        __le32 opts5;
@@ -506,24 +507,24 @@ struct rx_desc {
 
 struct tx_desc {
        __le32 opts1;
-#define TX_FS                  (1 << 31) /* First segment of a packet */
-#define TX_LS                  (1 << 30) /* Final segment of a packet */
-#define GTSENDV4               (1 << 28)
-#define GTSENDV6               (1 << 27)
+#define TX_FS                  BIT(31) /* First segment of a packet */
+#define TX_LS                  BIT(30) /* Final segment of a packet */
+#define GTSENDV4               BIT(28)
+#define GTSENDV6               BIT(27)
 #define GTTCPHO_SHIFT          18
 #define GTTCPHO_MAX            0x7fU
 #define TX_LEN_MAX             0x3ffffU
 
        __le32 opts2;
-#define UDP_CS                 (1 << 31) /* Calculate UDP/IP checksum */
-#define TCP_CS                 (1 << 30) /* Calculate TCP/IP checksum */
-#define IPV4_CS                        (1 << 29) /* Calculate IPv4 checksum */
-#define IPV6_CS                        (1 << 28) /* Calculate IPv6 checksum */
+#define UDP_CS                 BIT(31) /* Calculate UDP/IP checksum */
+#define TCP_CS                 BIT(30) /* Calculate TCP/IP checksum */
+#define IPV4_CS                        BIT(29) /* Calculate IPv4 checksum */
+#define IPV6_CS                        BIT(28) /* Calculate IPv6 checksum */
 #define MSS_SHIFT              17
 #define MSS_MAX                        0x7ffU
 #define TCPHO_SHIFT            17
 #define TCPHO_MAX              0x7ffU
-#define TX_VLAN_TAG                    (1 << 16)
+#define TX_VLAN_TAG            BIT(16)
 };
 
 struct r8152;
@@ -549,14 +550,14 @@ struct tx_agg {
 struct r8152 {
        unsigned long flags;
        struct usb_device *udev;
-       struct tasklet_struct tl;
+       struct napi_struct napi;
        struct usb_interface *intf;
        struct net_device *netdev;
        struct urb *intr_urb;
        struct tx_agg tx_info[RTL8152_MAX_TX];
        struct rx_agg rx_info[RTL8152_MAX_RX];
        struct list_head rx_done, tx_free;
-       struct sk_buff_head tx_queue;
+       struct sk_buff_head tx_queue, rx_queue;
        spinlock_t rx_lock, tx_lock;
        struct delayed_work schedule;
        struct mii_if_info mii;
@@ -580,7 +581,6 @@ struct r8152 {
        u16 ocp_base;
        u8 *intr_buff;
        u8 version;
-       u8 speed;
 };
 
 enum rtl_version {
@@ -833,9 +833,6 @@ static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
                index &= ~3;
        }
 
-       generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
-
-       data |= __le32_to_cpu(tmp) & ~mask;
        tmp = __cpu_to_le32(data);
 
        generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
@@ -874,9 +871,6 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
                index &= ~3;
        }
 
-       generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
-
-       data |= __le32_to_cpu(tmp) & ~mask;
        tmp = __cpu_to_le32(data);
 
        generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
@@ -926,12 +920,6 @@ static void sram_write(struct r8152 *tp, u16 addr, u16 data)
        ocp_reg_write(tp, OCP_SRAM_DATA, data);
 }
 
-static u16 sram_read(struct r8152 *tp, u16 addr)
-{
-       ocp_reg_write(tp, OCP_SRAM_ADDR, addr);
-       return ocp_reg_read(tp, OCP_SRAM_DATA);
-}
-
 static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
 {
        struct r8152 *tp = netdev_priv(netdev);
@@ -1062,7 +1050,7 @@ static void read_bulk_callback(struct urb *urb)
                spin_lock(&tp->rx_lock);
                list_add_tail(&agg->list, &tp->rx_done);
                spin_unlock(&tp->rx_lock);
-               tasklet_schedule(&tp->tl);
+               napi_schedule(&tp->napi);
                return;
        case -ESHUTDOWN:
                set_bit(RTL8152_UNPLUG, &tp->flags);
@@ -1126,7 +1114,7 @@ static void write_bulk_callback(struct urb *urb)
                return;
 
        if (!skb_queue_empty(&tp->tx_queue))
-               tasklet_schedule(&tp->tl);
+               napi_schedule(&tp->napi);
 }
 
 static void intr_callback(struct urb *urb)
@@ -1168,12 +1156,12 @@ static void intr_callback(struct urb *urb)
 
        d = urb->transfer_buffer;
        if (INTR_LINK & __le16_to_cpu(d[0])) {
-               if (!(tp->speed & LINK_STATUS)) {
+               if (!netif_carrier_ok(tp->netdev)) {
                        set_bit(RTL8152_LINK_CHG, &tp->flags);
                        schedule_delayed_work(&tp->schedule, 0);
                }
        } else {
-               if (tp->speed & LINK_STATUS) {
+               if (netif_carrier_ok(tp->netdev)) {
                        set_bit(RTL8152_LINK_CHG, &tp->flags);
                        schedule_delayed_work(&tp->schedule, 0);
                }
@@ -1245,6 +1233,7 @@ static int alloc_all_mem(struct r8152 *tp)
        spin_lock_init(&tp->tx_lock);
        INIT_LIST_HEAD(&tp->tx_free);
        skb_queue_head_init(&tp->tx_queue);
+       skb_queue_head_init(&tp->rx_queue);
 
        for (i = 0; i < RTL8152_MAX_RX; i++) {
                buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node);
@@ -1341,18 +1330,6 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
        return agg;
 }
 
-static inline __be16 get_protocol(struct sk_buff *skb)
-{
-       __be16 protocol;
-
-       if (skb->protocol == htons(ETH_P_8021Q))
-               protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
-       else
-               protocol = skb->protocol;
-
-       return protocol;
-}
-
 /* r8152_csum_workaround()
  * The hw limites the value the transport offset. When the offset is out of the
  * range, calculate the checksum by sw.
@@ -1421,10 +1398,10 @@ static int msdn_giant_send_check(struct sk_buff *skb)
 
 static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb)
 {
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                u32 opts2;
 
-               opts2 = TX_VLAN_TAG | swab16(vlan_tx_tag_get(skb));
+               opts2 = TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb));
                desc->opts2 |= cpu_to_le32(opts2);
        }
 }
@@ -1458,7 +1435,7 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
                        goto unavailable;
                }
 
-               switch (get_protocol(skb)) {
+               switch (vlan_get_protocol(skb)) {
                case htons(ETH_P_IP):
                        opts1 |= GTSENDV4;
                        break;
@@ -1489,7 +1466,7 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
                        goto unavailable;
                }
 
-               switch (get_protocol(skb)) {
+               switch (vlan_get_protocol(skb)) {
                case htons(ETH_P_IP):
                        opts2 |= IPV4_CS;
                        ip_protocol = ip_hdr(skb)->protocol;
@@ -1649,13 +1626,32 @@ return_result:
        return checksum;
 }
 
-static void rx_bottom(struct r8152 *tp)
+static int rx_bottom(struct r8152 *tp, int budget)
 {
        unsigned long flags;
        struct list_head *cursor, *next, rx_queue;
+       int ret = 0, work_done = 0;
+
+       if (!skb_queue_empty(&tp->rx_queue)) {
+               while (work_done < budget) {
+                       struct sk_buff *skb = __skb_dequeue(&tp->rx_queue);
+                       struct net_device *netdev = tp->netdev;
+                       struct net_device_stats *stats = &netdev->stats;
+                       unsigned int pkt_len;
+
+                       if (!skb)
+                               break;
+
+                       pkt_len = skb->len;
+                       napi_gro_receive(&tp->napi, skb);
+                       work_done++;
+                       stats->rx_packets++;
+                       stats->rx_bytes += pkt_len;
+               }
+       }
 
        if (list_empty(&tp->rx_done))
-               return;
+               goto out1;
 
        INIT_LIST_HEAD(&rx_queue);
        spin_lock_irqsave(&tp->rx_lock, flags);
@@ -1708,9 +1704,14 @@ static void rx_bottom(struct r8152 *tp)
                        skb_put(skb, pkt_len);
                        skb->protocol = eth_type_trans(skb, netdev);
                        rtl_rx_vlan_tag(rx_desc, skb);
-                       netif_receive_skb(skb);
-                       stats->rx_packets++;
-                       stats->rx_bytes += pkt_len;
+                       if (work_done < budget) {
+                               napi_gro_receive(&tp->napi, skb);
+                               work_done++;
+                               stats->rx_packets++;
+                               stats->rx_bytes += pkt_len;
+                       } else {
+                               __skb_queue_tail(&tp->rx_queue, skb);
+                       }
 
 find_next_rx:
                        rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE);
@@ -1720,8 +1721,22 @@ find_next_rx:
                }
 
 submit:
-               r8152_submit_rx(tp, agg, GFP_ATOMIC);
+               if (!ret) {
+                       ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
+               } else {
+                       urb->actual_length = 0;
+                       list_add_tail(&agg->list, next);
+               }
        }
+
+       if (!list_empty(&rx_queue)) {
+               spin_lock_irqsave(&tp->rx_lock, flags);
+               list_splice_tail(&rx_queue, &tp->rx_done);
+               spin_unlock_irqrestore(&tp->rx_lock, flags);
+       }
+
+out1:
+       return work_done;
 }
 
 static void tx_bottom(struct r8152 *tp)
@@ -1761,12 +1776,8 @@ static void tx_bottom(struct r8152 *tp)
        } while (res == 0);
 }
 
-static void bottom_half(unsigned long data)
+static void bottom_half(struct r8152 *tp)
 {
-       struct r8152 *tp;
-
-       tp = (struct r8152 *)data;
-
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
@@ -1778,17 +1789,38 @@ static void bottom_half(unsigned long data)
        if (!netif_carrier_ok(tp->netdev))
                return;
 
-       clear_bit(SCHEDULE_TASKLET, &tp->flags);
+       clear_bit(SCHEDULE_NAPI, &tp->flags);
 
-       rx_bottom(tp);
        tx_bottom(tp);
 }
 
+static int r8152_poll(struct napi_struct *napi, int budget)
+{
+       struct r8152 *tp = container_of(napi, struct r8152, napi);
+       int work_done;
+
+       work_done = rx_bottom(tp, budget);
+       bottom_half(tp);
+
+       if (work_done < budget) {
+               napi_complete(napi);
+               if (!list_empty(&tp->rx_done))
+                       napi_schedule(napi);
+       }
+
+       return work_done;
+}
+
 static
 int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
 {
        int ret;
 
+       /* The rx would be stopped, so skip submitting */
+       if (test_bit(RTL8152_UNPLUG, &tp->flags) ||
+           !test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev))
+               return 0;
+
        usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
                          agg->head, agg_buf_sz,
                          (usb_complete_t)read_bulk_callback, agg);
@@ -1805,7 +1837,11 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
                spin_lock_irqsave(&tp->rx_lock, flags);
                list_add_tail(&agg->list, &tp->rx_done);
                spin_unlock_irqrestore(&tp->rx_lock, flags);
-               tasklet_schedule(&tp->tl);
+
+               netif_err(tp, rx_err, tp->netdev,
+                         "Couldn't submit rx[%p], ret = %d\n", agg, ret);
+
+               napi_schedule(&tp->napi);
        }
 
        return ret;
@@ -1845,7 +1881,7 @@ static void rtl8152_set_rx_mode(struct net_device *netdev)
 {
        struct r8152 *tp = netdev_priv(netdev);
 
-       if (tp->speed & LINK_STATUS) {
+       if (netif_carrier_ok(netdev)) {
                set_bit(RTL8152_SET_RX_MODE, &tp->flags);
                schedule_delayed_work(&tp->schedule, 0);
        }
@@ -1897,6 +1933,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
        netif_wake_queue(netdev);
 }
 
+static netdev_features_t
+rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,
+                      netdev_features_t features)
+{
+       u32 mss = skb_shinfo(skb)->gso_size;
+       int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX;
+       int offset = skb_transport_offset(skb);
+
+       if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset)
+               features &= ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+       else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)
+               features &= ~NETIF_F_GSO_MASK;
+
+       return features;
+}
+
 static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
                                      struct net_device *netdev)
 {
@@ -1908,11 +1960,11 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
 
        if (!list_empty(&tp->tx_free)) {
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
-                       set_bit(SCHEDULE_TASKLET, &tp->flags);
+                       set_bit(SCHEDULE_NAPI, &tp->flags);
                        schedule_delayed_work(&tp->schedule, 0);
                } else {
                        usb_mark_last_busy(tp->udev);
-                       tasklet_schedule(&tp->tl);
+                       napi_schedule(&tp->napi);
                }
        } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) {
                netif_stop_queue(netdev);
@@ -1991,6 +2043,7 @@ static int rtl_start_rx(struct r8152 *tp)
 {
        int i, ret = 0;
 
+       napi_disable(&tp->napi);
        INIT_LIST_HEAD(&tp->rx_done);
        for (i = 0; i < RTL8152_MAX_RX; i++) {
                INIT_LIST_HEAD(&tp->rx_info[i].list);
@@ -1998,6 +2051,7 @@ static int rtl_start_rx(struct r8152 *tp)
                if (ret)
                        break;
        }
+       napi_enable(&tp->napi);
 
        if (ret && ++i < RTL8152_MAX_RX) {
                struct list_head rx_queue;
@@ -2028,6 +2082,9 @@ static int rtl_stop_rx(struct r8152 *tp)
        for (i = 0; i < RTL8152_MAX_RX; i++)
                usb_kill_urb(tp->rx_info[i].urb);
 
+       while (!skb_queue_empty(&tp->rx_queue))
+               dev_kfree_skb(__skb_dequeue(&tp->rx_queue));
+
        return 0;
 }
 
@@ -2043,7 +2100,7 @@ static int rtl_enable(struct r8152 *tp)
 
        rxdy_gated_en(tp, false);
 
-       return rtl_start_rx(tp);
+       return 0;
 }
 
 static int rtl8152_enable(struct r8152 *tp)
@@ -2502,24 +2559,18 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
        data = ocp_reg_read(tp, OCP_POWER_CFG);
        data |= EN_10M_PLLOFF;
        ocp_reg_write(tp, OCP_POWER_CFG, data);
-       data = sram_read(tp, SRAM_IMPEDANCE);
-       data &= ~RX_DRIVING_MASK;
-       sram_write(tp, SRAM_IMPEDANCE, data);
+       sram_write(tp, SRAM_IMPEDANCE, 0x0b13);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
        ocp_data |= PFM_PWM_SWITCH;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
 
-       data = sram_read(tp, SRAM_LPF_CFG);
-       data |= LPF_AUTO_TUNE;
-       sram_write(tp, SRAM_LPF_CFG, data);
+       /* Enable LPF corner auto tune */
+       sram_write(tp, SRAM_LPF_CFG, 0xf70f);
 
-       data = sram_read(tp, SRAM_10M_AMP1);
-       data |= GDAC_IB_UPALL;
-       sram_write(tp, SRAM_10M_AMP1, data);
-       data = sram_read(tp, SRAM_10M_AMP2);
-       data |= AMP_DN;
-       sram_write(tp, SRAM_10M_AMP2, data);
+       /* Adjust 10M Amplitude */
+       sram_write(tp, SRAM_10M_AMP1, 0x00af);
+       sram_write(tp, SRAM_10M_AMP2, 0x0208);
 
        set_bit(PHY_RESET, &tp->flags);
 }
@@ -2854,20 +2905,20 @@ static void set_carrier(struct r8152 *tp)
        speed = rtl8152_get_speed(tp);
 
        if (speed & LINK_STATUS) {
-               if (!(tp->speed & LINK_STATUS)) {
+               if (!netif_carrier_ok(netdev)) {
                        tp->rtl_ops.enable(tp);
                        set_bit(RTL8152_SET_RX_MODE, &tp->flags);
                        netif_carrier_on(netdev);
+                       rtl_start_rx(tp);
                }
        } else {
-               if (tp->speed & LINK_STATUS) {
+               if (netif_carrier_ok(netdev)) {
                        netif_carrier_off(netdev);
-                       tasklet_disable(&tp->tl);
+                       napi_disable(&tp->napi);
                        tp->rtl_ops.disable(tp);
-                       tasklet_enable(&tp->tl);
+                       napi_enable(&tp->napi);
                }
        }
-       tp->speed = speed;
 }
 
 static void rtl_work_func_t(struct work_struct *work)
@@ -2897,10 +2948,11 @@ static void rtl_work_func_t(struct work_struct *work)
        if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
                _rtl8152_set_rx_mode(tp->netdev);
 
-       if (test_bit(SCHEDULE_TASKLET, &tp->flags) &&
-           (tp->speed & LINK_STATUS)) {
-               clear_bit(SCHEDULE_TASKLET, &tp->flags);
-               tasklet_schedule(&tp->tl);
+       /* don't schedule napi before linking */
+       if (test_bit(SCHEDULE_NAPI, &tp->flags) &&
+           netif_carrier_ok(tp->netdev)) {
+               clear_bit(SCHEDULE_NAPI, &tp->flags);
+               napi_schedule(&tp->napi);
        }
 
        if (test_bit(PHY_RESET, &tp->flags))
@@ -2921,8 +2973,7 @@ static int rtl8152_open(struct net_device *netdev)
        if (res)
                goto out;
 
-       /* set speed to 0 to avoid autoresume try to submit rx */
-       tp->speed = 0;
+       netif_carrier_off(netdev);
 
        res = usb_autopm_get_interface(tp->intf);
        if (res < 0) {
@@ -2939,7 +2990,7 @@ static int rtl8152_open(struct net_device *netdev)
                cancel_delayed_work_sync(&tp->schedule);
 
                /* disable the tx/rx, if the workqueue has enabled them. */
-               if (tp->speed & LINK_STATUS)
+               if (netif_carrier_ok(netdev))
                        tp->rtl_ops.disable(tp);
        }
 
@@ -2948,7 +2999,6 @@ static int rtl8152_open(struct net_device *netdev)
        rtl8152_set_speed(tp, AUTONEG_ENABLE,
                          tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
                          DUPLEX_FULL);
-       tp->speed = 0;
        netif_carrier_off(netdev);
        netif_start_queue(netdev);
        set_bit(WORK_ENABLE, &tp->flags);
@@ -2961,7 +3011,7 @@ static int rtl8152_open(struct net_device *netdev)
                           res);
                free_all_mem(tp);
        } else {
-               tasklet_enable(&tp->tl);
+               napi_enable(&tp->napi);
        }
 
        mutex_unlock(&tp->control);
@@ -2977,15 +3027,16 @@ static int rtl8152_close(struct net_device *netdev)
        struct r8152 *tp = netdev_priv(netdev);
        int res = 0;
 
-       tasklet_disable(&tp->tl);
+       napi_disable(&tp->napi);
        clear_bit(WORK_ENABLE, &tp->flags);
        usb_kill_urb(tp->intr_urb);
        cancel_delayed_work_sync(&tp->schedule);
        netif_stop_queue(netdev);
 
        res = usb_autopm_get_interface(tp->intf);
-       if (res < 0) {
+       if (res < 0 || test_bit(RTL8152_UNPLUG, &tp->flags)) {
                rtl_drop_queued_tx(tp);
+               rtl_stop_rx(tp);
        } else {
                mutex_lock(&tp->control);
 
@@ -3189,10 +3240,10 @@ static void r8153_init(struct r8152 *tp)
 
        ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL);
        ocp_data &= ~LPM_TIMER_MASK;
-       if (tp->udev->speed == USB_SPEED_SUPER)
-               ocp_data |= LPM_TIMER_500US;
-       else
+       if (tp->version == RTL_VER_04 && tp->udev->speed != USB_SPEED_SUPER)
                ocp_data |= LPM_TIMER_500MS;
+       else
+               ocp_data |= LPM_TIMER_500US;
        ocp_write_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL, ocp_data);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2);
@@ -3241,7 +3292,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
        if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
                clear_bit(WORK_ENABLE, &tp->flags);
                usb_kill_urb(tp->intr_urb);
-               tasklet_disable(&tp->tl);
+               napi_disable(&tp->napi);
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
                        rtl_stop_rx(tp);
                        rtl_runtime_suspend_enable(tp, true);
@@ -3249,7 +3300,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
                        cancel_delayed_work_sync(&tp->schedule);
                        tp->rtl_ops.down(tp);
                }
-               tasklet_enable(&tp->tl);
+               napi_enable(&tp->napi);
        }
 out1:
        mutex_unlock(&tp->control);
@@ -3273,7 +3324,7 @@ static int rtl8152_resume(struct usb_interface *intf)
                        rtl_runtime_suspend_enable(tp, false);
                        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
                        set_bit(WORK_ENABLE, &tp->flags);
-                       if (tp->speed & LINK_STATUS)
+                       if (netif_carrier_ok(tp->netdev))
                                rtl_start_rx(tp);
                } else {
                        tp->rtl_ops.up(tp);
@@ -3281,7 +3332,6 @@ static int rtl8152_resume(struct usb_interface *intf)
                                          tp->mii.supports_gmii ?
                                          SPEED_1000 : SPEED_100,
                                          DUPLEX_FULL);
-                       tp->speed = 0;
                        netif_carrier_off(tp->netdev);
                        set_bit(WORK_ENABLE, &tp->flags);
                }
@@ -3706,6 +3756,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
        .ndo_set_mac_address    = rtl8152_set_mac_address,
        .ndo_change_mtu         = rtl8152_change_mtu,
        .ndo_validate_addr      = eth_validate_addr,
+       .ndo_features_check     = rtl8152_features_check,
 };
 
 static void r8152b_get_version(struct r8152 *tp)
@@ -3832,7 +3883,6 @@ static int rtl8152_probe(struct usb_interface *intf,
        if (ret)
                goto out;
 
-       tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
        mutex_init(&tp->control);
        INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
 
@@ -3846,8 +3896,7 @@ static int rtl8152_probe(struct usb_interface *intf,
        netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
                              NETIF_F_TSO | NETIF_F_FRAGLIST |
                              NETIF_F_IPV6_CSUM | NETIF_F_TSO6 |
-                             NETIF_F_HW_VLAN_CTAG_RX |
-                             NETIF_F_HW_VLAN_CTAG_TX;
+                             NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
        netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
                                NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
                                NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
@@ -3868,6 +3917,7 @@ static int rtl8152_probe(struct usb_interface *intf,
        set_ethernet_addr(tp);
 
        usb_set_intfdata(intf, tp);
+       netif_napi_add(netdev, &tp->napi, r8152_poll, RTL8152_NAPI_WEIGHT);
 
        ret = register_netdev(netdev);
        if (ret != 0) {
@@ -3881,15 +3931,13 @@ static int rtl8152_probe(struct usb_interface *intf,
        else
                device_set_wakeup_enable(&udev->dev, false);
 
-       tasklet_disable(&tp->tl);
-
        netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
 
        return 0;
 
 out1:
+       netif_napi_del(&tp->napi);
        usb_set_intfdata(intf, NULL);
-       tasklet_kill(&tp->tl);
 out:
        free_netdev(netdev);
        return ret;
@@ -3906,7 +3954,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
                if (udev->state == USB_STATE_NOTATTACHED)
                        set_bit(RTL8152_UNPLUG, &tp->flags);
 
-               tasklet_kill(&tp->tl);
+               netif_napi_del(&tp->napi);
                unregister_netdev(tp->netdev);
                tp->rtl_ops.unload(tp);
                free_netdev(tp->netdev);
index 99b69af142742523d873762bcdd9c58c0b2b8b0e..4a1e9c489f1f455388ffee289d65e1d6b36cba42 100644 (file)
@@ -77,7 +77,7 @@ static int wait_phy_eeprom_ready(struct usbnet *dev, int phy)
                int ret;
 
                udelay(1);
-               ret = sr_read_reg(dev, EPCR, &tmp);
+               ret = sr_read_reg(dev, SR_EPCR, &tmp);
                if (ret < 0)
                        return ret;
 
@@ -98,15 +98,15 @@ static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg,
 
        mutex_lock(&dev->phy_mutex);
 
-       sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
-       sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
+       sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
+       sr_write_reg(dev, SR_EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
 
        ret = wait_phy_eeprom_ready(dev, phy);
        if (ret < 0)
                goto out_unlock;
 
-       sr_write_reg(dev, EPCR, 0x0);
-       ret = sr_read(dev, EPDR, 2, value);
+       sr_write_reg(dev, SR_EPCR, 0x0);
+       ret = sr_read(dev, SR_EPDR, 2, value);
 
        netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
                   phy, reg, *value, ret);
@@ -123,19 +123,19 @@ static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg,
 
        mutex_lock(&dev->phy_mutex);
 
-       ret = sr_write(dev, EPDR, 2, &value);
+       ret = sr_write(dev, SR_EPDR, 2, &value);
        if (ret < 0)
                goto out_unlock;
 
-       sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
-       sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
+       sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
+       sr_write_reg(dev, SR_EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
                    (EPCR_WEP | EPCR_ERPRW));
 
        ret = wait_phy_eeprom_ready(dev, phy);
        if (ret < 0)
                goto out_unlock;
 
-       sr_write_reg(dev, EPCR, 0x0);
+       sr_write_reg(dev, SR_EPCR, 0x0);
 
 out_unlock:
        mutex_unlock(&dev->phy_mutex);
@@ -188,7 +188,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
        if (loc == MII_BMSR) {
                u8 value;
 
-               sr_read_reg(dev, NSR, &value);
+               sr_read_reg(dev, SR_NSR, &value);
                if (value & NSR_LINKST)
                        rc = 1;
        }
@@ -228,7 +228,7 @@ static u32 sr9700_get_link(struct net_device *netdev)
        int rc = 0;
 
        /* Get the Link Status directly */
-       sr_read_reg(dev, NSR, &value);
+       sr_read_reg(dev, SR_NSR, &value);
        if (value & NSR_LINKST)
                rc = 1;
 
@@ -281,8 +281,8 @@ static void sr9700_set_multicast(struct net_device *netdev)
                }
        }
 
-       sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes);
-       sr_write_reg_async(dev, RCR, rx_ctl);
+       sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes);
+       sr_write_reg_async(dev, SR_RCR, rx_ctl);
 }
 
 static int sr9700_set_mac_address(struct net_device *netdev, void *p)
@@ -297,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p)
        }
 
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-       sr_write_async(dev, PAR, 6, netdev->dev_addr);
+       sr_write_async(dev, SR_PAR, 6, netdev->dev_addr);
 
        return 0;
 }
@@ -340,7 +340,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
        mii->phy_id_mask = 0x1f;
        mii->reg_num_mask = 0x1f;
 
-       sr_write_reg(dev, NCR, NCR_RST);
+       sr_write_reg(dev, SR_NCR, NCR_RST);
        udelay(20);
 
        /* read MAC
@@ -348,17 +348,17 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
         * EEPROM automatically to PAR. In case there is no EEPROM externally,
         * a default MAC address is stored in PAR for making chip work properly.
         */
-       if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) {
+       if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) {
                netdev_err(netdev, "Error reading MAC address\n");
                ret = -ENODEV;
                goto out;
        }
 
        /* power up and reset phy */
-       sr_write_reg(dev, PRR, PRR_PHY_RST);
+       sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
        /* at least 10ms, here 20ms for safe */
        mdelay(20);
-       sr_write_reg(dev, PRR, 0);
+       sr_write_reg(dev, SR_PRR, 0);
        /* at least 1ms, here 2ms for reading right register */
        udelay(2 * 1000);
 
index fd687c575e742efce8d99e2fa28bb74dbba0e7dd..258b030277e753d56b7961a04128dce2d97da70a 100644 (file)
 /* sr9700 spec. register table on Linux platform */
 
 /* Network Control Reg */
-#define        NCR                     0x00
+#define        SR_NCR                  0x00
 #define                NCR_RST                 (1 << 0)
 #define                NCR_LBK                 (3 << 1)
 #define                NCR_FDX                 (1 << 3)
 #define                NCR_WAKEEN              (1 << 6)
 /* Network Status Reg */
-#define        NSR                     0x01
+#define        SR_NSR                  0x01
 #define                NSR_RXRDY               (1 << 0)
 #define                NSR_RXOV                (1 << 1)
 #define                NSR_TX1END              (1 << 2)
@@ -30,7 +30,7 @@
 #define                NSR_LINKST              (1 << 6)
 #define                NSR_SPEED               (1 << 7)
 /* Tx Control Reg */
-#define        TCR                     0x02
+#define        SR_TCR                  0x02
 #define                TCR_CRC_DIS             (1 << 1)
 #define                TCR_PAD_DIS             (1 << 2)
 #define                TCR_LC_CARE             (1 << 3)
@@ -38,7 +38,7 @@
 #define                TCR_EXCECM              (1 << 5)
 #define                TCR_LF_EN               (1 << 6)
 /* Tx Status Reg for Packet Index 1 */
-#define        TSR1            0x03
+#define        SR_TSR1         0x03
 #define                TSR1_EC                 (1 << 2)
 #define                TSR1_COL                (1 << 3)
 #define                TSR1_LC                 (1 << 4)
@@ -46,7 +46,7 @@
 #define                TSR1_LOC                (1 << 6)
 #define                TSR1_TLF                (1 << 7)
 /* Tx Status Reg for Packet Index 2 */
-#define        TSR2            0x04
+#define        SR_TSR2         0x04
 #define                TSR2_EC                 (1 << 2)
 #define                TSR2_COL                (1 << 3)
 #define                TSR2_LC                 (1 << 4)
@@ -54,7 +54,7 @@
 #define                TSR2_LOC                (1 << 6)
 #define                TSR2_TLF                (1 << 7)
 /* Rx Control Reg*/
-#define        RCR                     0x05
+#define        SR_RCR                  0x05
 #define                RCR_RXEN                (1 << 0)
 #define                RCR_PRMSC               (1 << 1)
 #define                RCR_RUNT                (1 << 2)
 #define                RCR_DIS_CRC             (1 << 4)
 #define                RCR_DIS_LONG    (1 << 5)
 /* Rx Status Reg */
-#define        RSR                     0x06
+#define        SR_RSR                  0x06
 #define                RSR_AE                  (1 << 2)
 #define                RSR_MF                  (1 << 6)
 #define                RSR_RF                  (1 << 7)
 /* Rx Overflow Counter Reg */
-#define        ROCR            0x07
+#define        SR_ROCR         0x07
 #define                ROCR_ROC                (0x7F << 0)
 #define                ROCR_RXFU               (1 << 7)
 /* Back Pressure Threshold Reg */
-#define        BPTR            0x08
+#define        SR_BPTR         0x08
 #define                BPTR_JPT                (0x0F << 0)
 #define                BPTR_BPHW               (0x0F << 4)
 /* Flow Control Threshold Reg */
-#define        FCTR            0x09
+#define        SR_FCTR         0x09
 #define                FCTR_LWOT               (0x0F << 0)
 #define                FCTR_HWOT               (0x0F << 4)
 /* rx/tx Flow Control Reg */
-#define        FCR                     0x0A
+#define        SR_FCR                  0x0A
 #define                FCR_FLCE                (1 << 0)
 #define                FCR_BKPA                (1 << 4)
 #define                FCR_TXPEN               (1 << 5)
 #define                FCR_TXPF                (1 << 6)
 #define                FCR_TXP0                (1 << 7)
 /* Eeprom & Phy Control Reg */
-#define        EPCR            0x0B
+#define        SR_EPCR         0x0B
 #define                EPCR_ERRE               (1 << 0)
 #define                EPCR_ERPRW              (1 << 1)
 #define                EPCR_ERPRR              (1 << 2)
 #define                EPCR_EPOS               (1 << 3)
 #define                EPCR_WEP                (1 << 4)
 /* Eeprom & Phy Address Reg */
-#define        EPAR            0x0C
+#define        SR_EPAR         0x0C
 #define                EPAR_EROA               (0x3F << 0)
 #define                EPAR_PHY_ADR_MASK       (0x03 << 6)
 #define                EPAR_PHY_ADR            (0x01 << 6)
 /* Eeprom &    Phy Data Reg */
-#define        EPDR            0x0D    /* 0x0D ~ 0x0E for Data Reg Low & High */
+#define        SR_EPDR         0x0D    /* 0x0D ~ 0x0E for Data Reg Low & High */
 /* Wakeup Control Reg */
-#define        WCR                     0x0F
+#define        SR_WCR                  0x0F
 #define                WCR_MAGICST             (1 << 0)
 #define                WCR_LINKST              (1 << 2)
 #define                WCR_MAGICEN             (1 << 3)
 #define                WCR_LINKEN              (1 << 5)
 /* Physical Address Reg */
-#define        PAR                     0x10    /* 0x10 ~ 0x15 6 bytes for PAR */
+#define        SR_PAR                  0x10    /* 0x10 ~ 0x15 6 bytes for PAR */
 /* Multicast Address Reg */
-#define        MAR                     0x16    /* 0x16 ~ 0x1D 8 bytes for MAR */
+#define        SR_MAR                  0x16    /* 0x16 ~ 0x1D 8 bytes for MAR */
 /* 0x1e unused */
 /* Phy Reset Reg */
-#define        PRR                     0x1F
+#define        SR_PRR                  0x1F
 #define                PRR_PHY_RST             (1 << 0)
 /* Tx sdram Write Pointer Address Low */
-#define        TWPAL           0x20
+#define        SR_TWPAL                0x20
 /* Tx sdram Write Pointer Address High */
-#define        TWPAH           0x21
+#define        SR_TWPAH                0x21
 /* Tx sdram Read Pointer Address Low */
-#define        TRPAL           0x22
+#define        SR_TRPAL                0x22
 /* Tx sdram Read Pointer Address High */
-#define        TRPAH           0x23
+#define        SR_TRPAH                0x23
 /* Rx sdram Write Pointer Address Low */
-#define        RWPAL           0x24
+#define        SR_RWPAL                0x24
 /* Rx sdram Write Pointer Address High */
-#define        RWPAH           0x25
+#define        SR_RWPAH                0x25
 /* Rx sdram Read Pointer Address Low */
-#define        RRPAL           0x26
+#define        SR_RRPAL                0x26
 /* Rx sdram Read Pointer Address High */
-#define        RRPAH           0x27
+#define        SR_RRPAH                0x27
 /* Vendor ID register */
-#define        VID                     0x28    /* 0x28 ~ 0x29 2 bytes for VID */
+#define        SR_VID                  0x28    /* 0x28 ~ 0x29 2 bytes for VID */
 /* Product ID register */
-#define        PID                     0x2A    /* 0x2A ~ 0x2B 2 bytes for PID */
+#define        SR_PID                  0x2A    /* 0x2A ~ 0x2B 2 bytes for PID */
 /* CHIP Revision register */
-#define        CHIPR           0x2C
+#define        SR_CHIPR                0x2C
 /* 0x2D --> 0xEF unused */
 /* USB Device Address */
-#define        USBDA           0xF0
+#define        SR_USBDA                0xF0
 #define                USBDA_USBFA             (0x7F << 0)
 /* RX packet Counter Reg */
-#define        RXC                     0xF1
+#define        SR_RXC                  0xF1
 /* Tx packet Counter & USB Status Reg */
-#define        TXC_USBS        0xF2
+#define        SR_TXC_USBS             0xF2
 #define                TXC_USBS_TXC0           (1 << 0)
 #define                TXC_USBS_TXC1           (1 << 1)
 #define                TXC_USBS_TXC2           (1 << 2)
 #define                TXC_USBS_SUSFLAG        (1 << 6)
 #define                TXC_USBS_RXFAULT        (1 << 7)
 /* USB Control register */
-#define        USBC            0xF4
+#define        SR_USBC                 0xF4
 #define                USBC_EP3NAK             (1 << 4)
 #define                USBC_EP3ACK             (1 << 5)
 
index 3a6770a65d7836ace177cff2bb45c925b474a84a..449835f4331e210daad6c8717430341c1c6996ef 100644 (file)
@@ -160,20 +160,19 @@ EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
 
 int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
 {
-       int             tmp, i;
+       int             tmp = -1, ret;
        unsigned char   buf [13];
 
-       tmp = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
-       if (tmp != 12) {
+       ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
+       if (ret == 12)
+               tmp = hex2bin(dev->net->dev_addr, buf, 6);
+       if (tmp < 0) {
                dev_dbg(&dev->udev->dev,
                        "bad MAC string %d fetch, %d\n", iMACAddress, tmp);
-               if (tmp >= 0)
-                       tmp = -EINVAL;
-               return tmp;
+               if (ret >= 0)
+                       ret = -EINVAL;
+               return ret;
        }
-       for (i = tmp = 0; i < 6; i++, tmp += 2)
-               dev->net->dev_addr [i] =
-                       (hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]);
        return 0;
 }
 EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
index 8ad596573d1783d512ba4b40e2044909850e7f52..4cca36ebc4fb194a22af440f11047ecc0ce7fd1e 100644 (file)
@@ -469,6 +469,14 @@ static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
        [VETH_INFO_PEER]        = { .len = sizeof(struct ifinfomsg) },
 };
 
+static struct net *veth_get_link_net(const struct net_device *dev)
+{
+       struct veth_priv *priv = netdev_priv(dev);
+       struct net_device *peer = rtnl_dereference(priv->peer);
+
+       return peer ? dev_net(peer) : dev_net(dev);
+}
+
 static struct rtnl_link_ops veth_link_ops = {
        .kind           = DRV_NAME,
        .priv_size      = sizeof(struct veth_priv),
@@ -478,6 +486,7 @@ static struct rtnl_link_ops veth_link_ops = {
        .dellink        = veth_dellink,
        .policy         = veth_policy,
        .maxtype        = VETH_INFO_MAX,
+       .get_link_net   = veth_get_link_net,
 };
 
 /*
index 11e2e8131359cf609e75d65c1fd2e51cb03c64af..110a2cf67244c8946a295c09d7c63b3297f0f0d3 100644 (file)
@@ -490,17 +490,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
-               {
-                       static bool warned;
-
-                       if (!warned) {
-                               warned = true;
-                               netdev_warn(dev,
-                                           "host using disabled UFO feature; please fix it\n");
-                       }
                        skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
                        break;
-               }
                case VIRTIO_NET_HDR_GSO_TCPV6:
                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
                        break;
@@ -888,6 +879,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
                        hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
                        hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+               else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+                       hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
                else
                        BUG();
                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
@@ -925,6 +918,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Free up any pending old buffers before queueing new ones. */
        free_old_xmit_skbs(sq);
 
+       /* timestamp packet in software */
+       skb_tx_timestamp(skb);
+
        /* Try to transmit */
        err = xmit_skb(sq, skb);
 
@@ -1376,6 +1372,7 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
        .get_ringparam = virtnet_get_ringparam,
        .set_channels = virtnet_set_channels,
        .get_channels = virtnet_get_channels,
+       .get_ts_info = ethtool_op_get_ts_info,
 };
 
 #define MIN_MTU 68
@@ -1748,7 +1745,7 @@ static int virtnet_probe(struct virtio_device *vdev)
                        dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
 
                if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
-                       dev->hw_features |= NETIF_F_TSO
+                       dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
                                | NETIF_F_TSO_ECN | NETIF_F_TSO6;
                }
                /* Individual feature bits: what can host handle? */
@@ -1758,11 +1755,13 @@ static int virtnet_probe(struct virtio_device *vdev)
                        dev->hw_features |= NETIF_F_TSO6;
                if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
                        dev->hw_features |= NETIF_F_TSO_ECN;
+               if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
+                       dev->hw_features |= NETIF_F_UFO;
 
                dev->features |= NETIF_F_GSO_ROBUST;
 
                if (gso)
-                       dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
+                       dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
                /* (!csum && gso) case will be fixed by register_netdev() */
        }
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
@@ -1800,7 +1799,8 @@ static int virtnet_probe(struct virtio_device *vdev)
        /* If we can receive ANY GSO packets, we must allocate large ones. */
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
            virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
-           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
+           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
                vi->big_packets = true;
 
        if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
@@ -1996,9 +1996,9 @@ static struct virtio_device_id id_table[] = {
 static unsigned int features[] = {
        VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
        VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
-       VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
+       VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
        VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
-       VIRTIO_NET_F_GUEST_ECN,
+       VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
        VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
        VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
        VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
index 4d84912c99bae05f19eec5abf04632e077c1cc4f..3718d024f63858b27cceb1fdd319d4c5915e9b4b 100644 (file)
@@ -342,6 +342,7 @@ union Vmxnet3_GenericDesc {
 #define VMXNET3_TX_RING_MAX_SIZE   4096
 #define VMXNET3_TC_RING_MAX_SIZE   4096
 #define VMXNET3_RX_RING_MAX_SIZE   4096
+#define VMXNET3_RX_RING2_MAX_SIZE  2048
 #define VMXNET3_RC_RING_MAX_SIZE   8192
 
 /* a list of reasons for queue stop */
@@ -392,7 +393,7 @@ struct Vmxnet3_DriverInfo {
 };
 
 
-#define VMXNET3_REV1_MAGIC  0xbabefee1
+#define VMXNET3_REV1_MAGIC  3133079265u
 
 /*
  * QueueDescPA must be 128 bytes aligned. It points to an array of
index afd295348ddbe0eb968cd884f657b2b7c2857ebe..294214c152927a30564bf83d9204f3351b64f237 100644 (file)
@@ -1038,9 +1038,9 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
                le32_add_cpu(&tq->shared->txNumDeferred, 1);
        }
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                gdesc->txd.ti = 1;
-               gdesc->txd.tci = vlan_tx_tag_get(skb);
+               gdesc->txd.tci = skb_vlan_tag_get(skb);
        }
 
        /* finally flips the GEN bit of the SOP desc. */
@@ -2505,6 +2505,9 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
        ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
                           sz * sz);
        ring1_size = adapter->rx_queue[0].rx_ring[1].size;
+       ring1_size = (ring1_size + sz - 1) / sz * sz;
+       ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
+                          sz * sz);
        comp_size = ring0_size + ring1_size;
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -2585,7 +2588,7 @@ vmxnet3_open(struct net_device *netdev)
 
        err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
                                    adapter->rx_ring_size,
-                                   VMXNET3_DEF_RX_RING_SIZE);
+                                   adapter->rx_ring2_size);
        if (err)
                goto queue_err;
 
@@ -2964,6 +2967,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
 
        adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
        adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+       adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
 
        spin_lock_init(&adapter->cmd_lock);
        adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
@@ -3286,27 +3290,15 @@ skip_arp:
 static int
 vmxnet3_resume(struct device *device)
 {
-       int err, i = 0;
+       int err;
        unsigned long flags;
        struct pci_dev *pdev = to_pci_dev(device);
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
-       struct Vmxnet3_PMConf *pmConf;
 
        if (!netif_running(netdev))
                return 0;
 
-       /* Destroy wake-up filters. */
-       pmConf = adapter->pm_conf;
-       memset(pmConf, 0, sizeof(*pmConf));
-
-       adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
-       adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
-                                                                 *pmConf));
-       adapter->shared->devRead.pmConfDesc.confPA =
-               cpu_to_le64(adapter->pm_conf_pa);
-
-       netif_device_attach(netdev);
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
        err = pci_enable_device_mem(pdev);
@@ -3315,15 +3307,31 @@ vmxnet3_resume(struct device *device)
 
        pci_enable_wake(pdev, PCI_D0, 0);
 
+       vmxnet3_alloc_intr_resources(adapter);
+
+       /* During hibernate and suspend, device has to be reinitialized as the
+        * device state need not be preserved.
+        */
+
+       /* Need not check adapter state as other reset tasks cannot run during
+        * device resume.
+        */
        spin_lock_irqsave(&adapter->cmd_lock, flags);
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
-                              VMXNET3_CMD_UPDATE_PMCFG);
+                              VMXNET3_CMD_QUIESCE_DEV);
        spin_unlock_irqrestore(&adapter->cmd_lock, flags);
-       vmxnet3_alloc_intr_resources(adapter);
-       vmxnet3_request_irqs(adapter);
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               napi_enable(&adapter->rx_queue[i].napi);
-       vmxnet3_enable_all_intrs(adapter);
+       vmxnet3_tq_cleanup_all(adapter);
+       vmxnet3_rq_cleanup_all(adapter);
+
+       vmxnet3_reset_dev(adapter);
+       err = vmxnet3_activate_dev(adapter);
+       if (err != 0) {
+               netdev_err(netdev,
+                          "failed to re-activate on resume, error: %d", err);
+               vmxnet3_force_close(adapter);
+               return err;
+       }
+       netif_device_attach(netdev);
 
        return 0;
 }
@@ -3331,6 +3339,8 @@ vmxnet3_resume(struct device *device)
 static const struct dev_pm_ops vmxnet3_pm_ops = {
        .suspend = vmxnet3_suspend,
        .resume = vmxnet3_resume,
+       .freeze = vmxnet3_suspend,
+       .restore = vmxnet3_resume,
 };
 #endif
 
index b7b53329d5751676b6a9daf4c9cff9bb4ce923ad..4c8a944d58b41f0e42c1d9e34acb83a275c34b1d 100644 (file)
@@ -323,7 +323,7 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
                                          vmxnet3_tq_driver_stats[i].offset);
        }
 
-       for (j = 0; j < adapter->num_tx_queues; j++) {
+       for (j = 0; j < adapter->num_rx_queues; j++) {
                base = (u8 *)&adapter->rqd_start[j].stats;
                *buf++ = (u64) j;
                for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
@@ -447,12 +447,12 @@ vmxnet3_get_ringparam(struct net_device *netdev,
        param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
        param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
        param->rx_mini_max_pending = 0;
-       param->rx_jumbo_max_pending = 0;
+       param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE;
 
        param->rx_pending = adapter->rx_ring_size;
        param->tx_pending = adapter->tx_ring_size;
        param->rx_mini_pending = 0;
-       param->rx_jumbo_pending = 0;
+       param->rx_jumbo_pending = adapter->rx_ring2_size;
 }
 
 
@@ -461,7 +461,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
                      struct ethtool_ringparam *param)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
-       u32 new_tx_ring_size, new_rx_ring_size;
+       u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size;
        u32 sz;
        int err = 0;
 
@@ -473,6 +473,10 @@ vmxnet3_set_ringparam(struct net_device *netdev,
                                                VMXNET3_RX_RING_MAX_SIZE)
                return -EINVAL;
 
+       if (param->rx_jumbo_pending == 0 ||
+           param->rx_jumbo_pending > VMXNET3_RX_RING2_MAX_SIZE)
+               return -EINVAL;
+
        /* if adapter not yet initialized, do nothing */
        if (adapter->rx_buf_per_pkt == 0) {
                netdev_err(netdev, "adapter not completely initialized, "
@@ -500,8 +504,15 @@ vmxnet3_set_ringparam(struct net_device *netdev,
                                                           sz) != 0)
                return -EINVAL;
 
-       if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size &&
-           new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) {
+       /* ring2 has to be a multiple of VMXNET3_RING_SIZE_ALIGN */
+       new_rx_ring2_size = (param->rx_jumbo_pending + VMXNET3_RING_SIZE_MASK) &
+                               ~VMXNET3_RING_SIZE_MASK;
+       new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
+                                 VMXNET3_RX_RING2_MAX_SIZE);
+
+       if (new_tx_ring_size == adapter->tx_ring_size &&
+           new_rx_ring_size == adapter->rx_ring_size &&
+           new_rx_ring2_size == adapter->rx_ring2_size) {
                return 0;
        }
 
@@ -522,7 +533,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
                vmxnet3_rq_destroy_all(adapter);
 
                err = vmxnet3_create_queues(adapter, new_tx_ring_size,
-                       new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
+                       new_rx_ring_size, new_rx_ring2_size);
 
                if (err) {
                        /* failed, most likely because of OOM, try default
@@ -530,11 +541,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
                        netdev_err(netdev, "failed to apply new sizes, "
                                   "try the default ones\n");
                        new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+                       new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
                        new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
                        err = vmxnet3_create_queues(adapter,
                                                    new_tx_ring_size,
                                                    new_rx_ring_size,
-                                                   VMXNET3_DEF_RX_RING_SIZE);
+                                                   new_rx_ring2_size);
                        if (err) {
                                netdev_err(netdev, "failed to create queues "
                                           "with default sizes. Closing it\n");
@@ -549,6 +561,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
        }
        adapter->tx_ring_size = new_tx_ring_size;
        adapter->rx_ring_size = new_rx_ring_size;
+       adapter->rx_ring2_size = new_rx_ring2_size;
 
 out:
        clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
index 5f0199f6c31e6688c1a7e931fc90414c97907802..cd71c77f78f2f5aeaa7165cbe83e71c0e25587c3 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.2.1.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.3.4.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01020100
+#define VMXNET3_DRIVER_VERSION_NUM      0x01030400
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
@@ -352,6 +352,7 @@ struct vmxnet3_adapter {
        /* Ring sizes */
        u32 tx_ring_size;
        u32 rx_ring_size;
+       u32 rx_ring2_size;
 
        struct work_struct work;
 
@@ -384,6 +385,7 @@ struct vmxnet3_adapter {
 /* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
 #define VMXNET3_DEF_TX_RING_SIZE    512
 #define VMXNET3_DEF_RX_RING_SIZE    256
+#define VMXNET3_DEF_RX_RING2_SIZE   128
 
 #define VMXNET3_MAX_ETH_HDR_SIZE    22
 #define VMXNET3_MAX_SKB_BUF_SIZE    (3*1024)
index 2ab0922af0b430f0cf6e6e0b88099cffba3d698f..e6ed3e66964d6dbc7952831d3bebd949df66d70b 100644 (file)
 #define FDB_AGE_DEFAULT 300 /* 5 min */
 #define FDB_AGE_INTERVAL (10 * HZ)     /* rescan interval */
 
-#define VXLAN_N_VID    (1u << 24)
-#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
-#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
-
-#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
-
 /* UDP port for VXLAN traffic.
  * The IANA assigned port is 4789, but the Linux default is 8472
  * for compatibility with early adopters.
@@ -269,15 +263,20 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
        return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
 }
 
-/* Find VXLAN socket based on network namespace, address family and UDP port */
-static struct vxlan_sock *vxlan_find_sock(struct net *net,
-                                         sa_family_t family, __be16 port)
+/* Find VXLAN socket based on network namespace, address family and UDP port
+ * and enabled unshareable flags.
+ */
+static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
+                                         __be16 port, u32 flags)
 {
        struct vxlan_sock *vs;
 
+       flags &= VXLAN_F_RCV_FLAGS;
+
        hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
                if (inet_sk(vs->sock->sk)->inet_sport == port &&
-                   inet_sk(vs->sock->sk)->sk.sk_family == family)
+                   inet_sk(vs->sock->sk)->sk.sk_family == family &&
+                   vs->flags == flags)
                        return vs;
        }
        return NULL;
@@ -297,11 +296,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
 
 /* Look up VNI in a per net namespace table */
 static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
-                                       sa_family_t family, __be16 port)
+                                       sa_family_t family, __be16 port,
+                                       u32 flags)
 {
        struct vxlan_sock *vs;
 
-       vs = vxlan_find_sock(net, family, port);
+       vs = vxlan_find_sock(net, family, port, flags);
        if (!vs)
                return NULL;
 
@@ -340,6 +340,11 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
        ndm->ndm_flags = fdb->flags;
        ndm->ndm_type = RTN_UNICAST;
 
+       if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
+           nla_put_s32(skb, NDA_LINK_NETNSID,
+                       peernet2id(dev_net(vxlan->dev), vxlan->net)))
+               goto nla_put_failure;
+
        if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
                goto nla_put_failure;
 
@@ -364,7 +369,8 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
        if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -379,6 +385,7 @@ static inline size_t vxlan_nlmsg_size(void)
                + nla_total_size(sizeof(__be16)) /* NDA_PORT */
                + nla_total_size(sizeof(__be32)) /* NDA_VNI */
                + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
+               + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
                + nla_total_size(sizeof(struct nda_cacheinfo));
 }
 
@@ -545,12 +552,51 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
        return 1;
 }
 
-static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
+                                         unsigned int off,
+                                         struct vxlanhdr *vh, size_t hdrlen,
+                                         u32 data)
+{
+       size_t start, offset, plen;
+
+       if (skb->remcsum_offload)
+               return vh;
+
+       if (!NAPI_GRO_CB(skb)->csum_valid)
+               return NULL;
+
+       start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
+       offset = start + ((data & VXLAN_RCO_UDP) ?
+                         offsetof(struct udphdr, check) :
+                         offsetof(struct tcphdr, check));
+
+       plen = hdrlen + offset + sizeof(u16);
+
+       /* Pull checksum that will be written */
+       if (skb_gro_header_hard(skb, off + plen)) {
+               vh = skb_gro_header_slow(skb, off + plen, off);
+               if (!vh)
+                       return NULL;
+       }
+
+       skb_gro_remcsum_process(skb, (void *)vh + hdrlen, start, offset);
+
+       skb->remcsum_offload = 1;
+
+       return vh;
+}
+
+static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
+                                         struct sk_buff *skb,
+                                         struct udp_offload *uoff)
 {
        struct sk_buff *p, **pp = NULL;
        struct vxlanhdr *vh, *vh2;
        unsigned int hlen, off_vx;
        int flush = 1;
+       struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
+                                            udp_offloads);
+       u32 flags;
 
        off_vx = skb_gro_offset(skb);
        hlen = off_vx + sizeof(*vh);
@@ -561,6 +607,19 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
                        goto out;
        }
 
+       skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
+       skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
+
+       flags = ntohl(vh->vx_flags);
+
+       if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
+               vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
+                                      ntohl(vh->vx_vni));
+
+               if (!vh)
+                       goto out;
+       }
+
        flush = 0;
 
        for (p = *head; p; p = p->next) {
@@ -568,14 +627,13 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
                        continue;
 
                vh2 = (struct vxlanhdr *)(p->data + off_vx);
-               if (vh->vx_vni != vh2->vx_vni) {
+               if (vh->vx_flags != vh2->vx_flags ||
+                   vh->vx_vni != vh2->vx_vni) {
                        NAPI_GRO_CB(p)->same_flow = 0;
                        continue;
                }
        }
 
-       skb_gro_pull(skb, sizeof(struct vxlanhdr));
-       skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
        pp = eth_gro_receive(head, skb);
 
 out:
@@ -584,7 +642,8 @@ out:
        return pp;
 }
 
-static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
+static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
+                             struct udp_offload *uoff)
 {
        udp_tunnel_gro_complete(skb, nhoff);
 
@@ -1090,33 +1149,107 @@ static void vxlan_igmp_leave(struct work_struct *work)
        dev_put(vxlan->dev);
 }
 
+static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
+                                     size_t hdrlen, u32 data)
+{
+       size_t start, offset, plen;
+
+       if (skb->remcsum_offload) {
+               /* Already processed in GRO path */
+               skb->remcsum_offload = 0;
+               return vh;
+       }
+
+       start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
+       offset = start + ((data & VXLAN_RCO_UDP) ?
+                         offsetof(struct udphdr, check) :
+                         offsetof(struct tcphdr, check));
+
+       plen = hdrlen + offset + sizeof(u16);
+
+       if (!pskb_may_pull(skb, plen))
+               return NULL;
+
+       vh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
+
+       skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset);
+
+       return vh;
+}
+
 /* Callback from net/ipv4/udp.c to receive packets */
 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 {
        struct vxlan_sock *vs;
        struct vxlanhdr *vxh;
+       u32 flags, vni;
+       struct vxlan_metadata md = {0};
 
        /* Need Vxlan and inner Ethernet header to be present */
        if (!pskb_may_pull(skb, VXLAN_HLEN))
                goto error;
 
-       /* Return packets with reserved bits set */
        vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
-       if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
-           (vxh->vx_vni & htonl(0xff))) {
-               netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
-                          ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
-               goto error;
+       flags = ntohl(vxh->vx_flags);
+       vni = ntohl(vxh->vx_vni);
+
+       if (flags & VXLAN_HF_VNI) {
+               flags &= ~VXLAN_HF_VNI;
+       } else {
+               /* VNI flag always required to be set */
+               goto bad_flags;
        }
 
        if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
                goto drop;
+       vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
 
        vs = rcu_dereference_sk_user_data(sk);
        if (!vs)
                goto drop;
 
-       vs->rcv(vs, skb, vxh->vx_vni);
+       if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
+               vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni);
+               if (!vxh)
+                       goto drop;
+
+               flags &= ~VXLAN_HF_RCO;
+               vni &= VXLAN_VID_MASK;
+       }
+
+       /* For backwards compatibility, only allow reserved fields to be
+        * used by VXLAN extensions if explicitly requested.
+        */
+       if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
+               struct vxlanhdr_gbp *gbp;
+
+               gbp = (struct vxlanhdr_gbp *)vxh;
+               md.gbp = ntohs(gbp->policy_id);
+
+               if (gbp->dont_learn)
+                       md.gbp |= VXLAN_GBP_DONT_LEARN;
+
+               if (gbp->policy_applied)
+                       md.gbp |= VXLAN_GBP_POLICY_APPLIED;
+
+               flags &= ~VXLAN_GBP_USED_BITS;
+       }
+
+       if (flags || (vni & ~VXLAN_VID_MASK)) {
+               /* If there are any unprocessed flags remaining treat
+                * this as a malformed packet. This behavior diverges from
+                * VXLAN RFC (RFC7348) which stipulates that bits in reserved
+                * in reserved fields are to be ignored. The approach here
+                * maintains compatbility with previous stack code, and also
+                * is more robust and provides a little more security in
+                * adding extensions to VXLAN.
+                */
+
+               goto bad_flags;
+       }
+
+       md.vni = vxh->vx_vni;
+       vs->rcv(vs, skb, &md);
        return 0;
 
 drop:
@@ -1124,13 +1257,17 @@ drop:
        kfree_skb(skb);
        return 0;
 
+bad_flags:
+       netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
+                  ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
+
 error:
        /* Return non vxlan pkt */
        return 1;
 }
 
-static void vxlan_rcv(struct vxlan_sock *vs,
-                     struct sk_buff *skb, __be32 vx_vni)
+static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
+                     struct vxlan_metadata *md)
 {
        struct iphdr *oip = NULL;
        struct ipv6hdr *oip6 = NULL;
@@ -1141,7 +1278,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
        int err = 0;
        union vxlan_addr *remote_ip;
 
-       vni = ntohl(vx_vni) >> 8;
+       vni = ntohl(md->vni) >> 8;
        /* Is this VNI defined? */
        vxlan = vxlan_vs_find_vni(vs, vni);
        if (!vxlan)
@@ -1175,6 +1312,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
                goto drop;
 
        skb_reset_network_header(skb);
+       skb->mark = md->gbp;
 
        if (oip6)
                err = IP6_ECN_decapsulate(oip6, skb);
@@ -1524,20 +1662,54 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
        return false;
 }
 
+static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
+                               struct vxlan_metadata *md)
+{
+       struct vxlanhdr_gbp *gbp;
+
+       if (!md->gbp)
+               return;
+
+       gbp = (struct vxlanhdr_gbp *)vxh;
+       vxh->vx_flags |= htonl(VXLAN_HF_GBP);
+
+       if (md->gbp & VXLAN_GBP_DONT_LEARN)
+               gbp->dont_learn = 1;
+
+       if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
+               gbp->policy_applied = 1;
+
+       gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
+}
+
 #if IS_ENABLED(CONFIG_IPV6)
-static int vxlan6_xmit_skb(struct vxlan_sock *vs,
-                          struct dst_entry *dst, struct sk_buff *skb,
+static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
                           struct net_device *dev, struct in6_addr *saddr,
                           struct in6_addr *daddr, __u8 prio, __u8 ttl,
-                          __be16 src_port, __be16 dst_port, __be32 vni,
-                          bool xnet)
+                          __be16 src_port, __be16 dst_port,
+                          struct vxlan_metadata *md, bool xnet, u32 vxflags)
 {
        struct vxlanhdr *vxh;
        int min_headroom;
        int err;
-       bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
+       bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+       int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+       u16 hdrlen = sizeof(struct vxlanhdr);
+
+       if ((vxflags & VXLAN_F_REMCSUM_TX) &&
+           skb->ip_summed == CHECKSUM_PARTIAL) {
+               int csum_start = skb_checksum_start_offset(skb);
+
+               if (csum_start <= VXLAN_MAX_REMCSUM_START &&
+                   !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
+                   (skb->csum_offset == offsetof(struct udphdr, check) ||
+                    skb->csum_offset == offsetof(struct tcphdr, check))) {
+                       udp_sum = false;
+                       type |= SKB_GSO_TUNNEL_REMCSUM;
+               }
+       }
 
-       skb = udp_tunnel_handle_offloads(skb, udp_sum);
+       skb = iptunnel_handle_offloads(skb, udp_sum, type);
        if (IS_ERR(skb)) {
                err = -EINVAL;
                goto err;
@@ -1547,7 +1719,7 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
 
        min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
                        + VXLAN_HLEN + sizeof(struct ipv6hdr)
-                       + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+                       + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
 
        /* Need space for new headers (invalidates iph ptr) */
        err = skb_cow_head(skb, min_headroom);
@@ -1563,13 +1735,33 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
        }
 
        vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
-       vxh->vx_flags = htonl(VXLAN_FLAGS);
-       vxh->vx_vni = vni;
+       vxh->vx_flags = htonl(VXLAN_HF_VNI);
+       vxh->vx_vni = md->vni;
+
+       if (type & SKB_GSO_TUNNEL_REMCSUM) {
+               u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
+                          VXLAN_RCO_SHIFT;
+
+               if (skb->csum_offset == offsetof(struct udphdr, check))
+                       data |= VXLAN_RCO_UDP;
+
+               vxh->vx_vni |= htonl(data);
+               vxh->vx_flags |= htonl(VXLAN_HF_RCO);
+
+               if (!skb_is_gso(skb)) {
+                       skb->ip_summed = CHECKSUM_NONE;
+                       skb->encapsulation = 0;
+               }
+       }
+
+       if (vxflags & VXLAN_F_GBP)
+               vxlan_build_gbp_hdr(vxh, vxflags, md);
 
        skb_set_inner_protocol(skb, htons(ETH_P_TEB));
 
-       udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio,
-                            ttl, src_port, dst_port);
+       udp_tunnel6_xmit_skb(dst, skb, dev, saddr, daddr, prio,
+                            ttl, src_port, dst_port,
+                            !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
        return 0;
 err:
        dst_release(dst);
@@ -1577,23 +1769,38 @@ err:
 }
 #endif
 
-int vxlan_xmit_skb(struct vxlan_sock *vs,
-                  struct rtable *rt, struct sk_buff *skb,
+int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
                   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-                  __be16 src_port, __be16 dst_port, __be32 vni, bool xnet)
+                  __be16 src_port, __be16 dst_port,
+                  struct vxlan_metadata *md, bool xnet, u32 vxflags)
 {
        struct vxlanhdr *vxh;
        int min_headroom;
        int err;
-       bool udp_sum = !vs->sock->sk->sk_no_check_tx;
+       bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);
+       int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+       u16 hdrlen = sizeof(struct vxlanhdr);
+
+       if ((vxflags & VXLAN_F_REMCSUM_TX) &&
+           skb->ip_summed == CHECKSUM_PARTIAL) {
+               int csum_start = skb_checksum_start_offset(skb);
+
+               if (csum_start <= VXLAN_MAX_REMCSUM_START &&
+                   !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
+                   (skb->csum_offset == offsetof(struct udphdr, check) ||
+                    skb->csum_offset == offsetof(struct tcphdr, check))) {
+                       udp_sum = false;
+                       type |= SKB_GSO_TUNNEL_REMCSUM;
+               }
+       }
 
-       skb = udp_tunnel_handle_offloads(skb, udp_sum);
+       skb = iptunnel_handle_offloads(skb, udp_sum, type);
        if (IS_ERR(skb))
                return PTR_ERR(skb);
 
        min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
                        + VXLAN_HLEN + sizeof(struct iphdr)
-                       + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+                       + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
 
        /* Need space for new headers (invalidates iph ptr) */
        err = skb_cow_head(skb, min_headroom);
@@ -1607,13 +1814,33 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
                return -ENOMEM;
 
        vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
-       vxh->vx_flags = htonl(VXLAN_FLAGS);
-       vxh->vx_vni = vni;
+       vxh->vx_flags = htonl(VXLAN_HF_VNI);
+       vxh->vx_vni = md->vni;
+
+       if (type & SKB_GSO_TUNNEL_REMCSUM) {
+               u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
+                          VXLAN_RCO_SHIFT;
+
+               if (skb->csum_offset == offsetof(struct udphdr, check))
+                       data |= VXLAN_RCO_UDP;
+
+               vxh->vx_vni |= htonl(data);
+               vxh->vx_flags |= htonl(VXLAN_HF_RCO);
+
+               if (!skb_is_gso(skb)) {
+                       skb->ip_summed = CHECKSUM_NONE;
+                       skb->encapsulation = 0;
+               }
+       }
+
+       if (vxflags & VXLAN_F_GBP)
+               vxlan_build_gbp_hdr(vxh, vxflags, md);
 
        skb_set_inner_protocol(skb, htons(ETH_P_TEB));
 
-       return udp_tunnel_xmit_skb(vs->sock, rt, skb, src, dst, tos,
-                                  ttl, df, src_port, dst_port, xnet);
+       return udp_tunnel_xmit_skb(rt, skb, src, dst, tos,
+                                  ttl, df, src_port, dst_port, xnet,
+                                  !(vxflags & VXLAN_F_UDP_CSUM));
 }
 EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
 
@@ -1670,6 +1897,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        const struct iphdr *old_iph;
        struct flowi4 fl4;
        union vxlan_addr *dst;
+       struct vxlan_metadata md;
        __be16 src_port = 0, dst_port;
        u32 vni;
        __be16 df = 0;
@@ -1731,7 +1959,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
                        ip_rt_put(rt);
                        dst_vxlan = vxlan_find_vni(vxlan->net, vni,
-                                                  dst->sa.sa_family, dst_port);
+                                                  dst->sa.sa_family, dst_port,
+                                                  vxlan->flags);
                        if (!dst_vxlan)
                                goto tx_error;
                        vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1740,12 +1969,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
-
-               err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb,
-                                    fl4.saddr, dst->sin.sin_addr.s_addr,
-                                    tos, ttl, df, src_port, dst_port,
-                                    htonl(vni << 8),
-                                    !net_eq(vxlan->net, dev_net(vxlan->dev)));
+               md.vni = htonl(vni << 8);
+               md.gbp = skb->mark;
+
+               err = vxlan_xmit_skb(rt, skb, fl4.saddr,
+                                    dst->sin.sin_addr.s_addr, tos, ttl, df,
+                                    src_port, dst_port, &md,
+                                    !net_eq(vxlan->net, dev_net(vxlan->dev)),
+                                    vxlan->flags);
                if (err < 0) {
                        /* skb is already freed. */
                        skb = NULL;
@@ -1789,7 +2020,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
                        dst_release(ndst);
                        dst_vxlan = vxlan_find_vni(vxlan->net, vni,
-                                                  dst->sa.sa_family, dst_port);
+                                                  dst->sa.sa_family, dst_port,
+                                                  vxlan->flags);
                        if (!dst_vxlan)
                                goto tx_error;
                        vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1797,11 +2029,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                }
 
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
+               md.vni = htonl(vni << 8);
+               md.gbp = skb->mark;
 
-               err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb,
-                                     dev, &fl6.saddr, &fl6.daddr, 0, ttl,
-                                     src_port, dst_port, htonl(vni << 8),
-                                     !net_eq(vxlan->net, dev_net(vxlan->dev)));
+               err = vxlan6_xmit_skb(ndst, skb, dev, &fl6.saddr, &fl6.daddr,
+                                     0, ttl, src_port, dst_port, &md,
+                                     !net_eq(vxlan->net, dev_net(vxlan->dev)),
+                                     vxlan->flags);
 #endif
        }
 
@@ -1957,7 +2191,7 @@ static int vxlan_init(struct net_device *dev)
 
        spin_lock(&vn->sock_lock);
        vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
-                            vxlan->dst_port);
+                            vxlan->dst_port, vxlan->flags);
        if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) {
                /* If we have a socket with same port already, reuse it */
                vxlan_vs_add_dev(vs, vxlan);
@@ -2201,6 +2435,9 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
        [IFLA_VXLAN_UDP_CSUM]   = { .type = NLA_U8 },
        [IFLA_VXLAN_UDP_ZERO_CSUM6_TX]  = { .type = NLA_U8 },
        [IFLA_VXLAN_UDP_ZERO_CSUM6_RX]  = { .type = NLA_U8 },
+       [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
+       [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
+       [IFLA_VXLAN_GBP]        = { .type = NLA_FLAG, },
 };
 
 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -2270,15 +2507,11 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
 
        if (ipv6) {
                udp_conf.family = AF_INET6;
-               udp_conf.use_udp6_tx_checksums =
-                   !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
                udp_conf.use_udp6_rx_checksums =
                    !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
        } else {
                udp_conf.family = AF_INET;
                udp_conf.local_ip.s_addr = INADDR_ANY;
-               udp_conf.use_udp_checksums =
-                   !!(flags & VXLAN_F_UDP_CSUM);
        }
 
        udp_conf.local_udp_port = port;
@@ -2322,6 +2555,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
        atomic_set(&vs->refcnt, 1);
        vs->rcv = rcv;
        vs->data = data;
+       vs->flags = (flags & VXLAN_F_RCV_FLAGS);
 
        /* Initialize the vxlan udp offloads structure */
        vs->udp_offloads.port = port;
@@ -2360,7 +2594,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
                return vs;
 
        spin_lock(&vn->sock_lock);
-       vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
+       vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port, flags);
        if (vs && ((vs->rcv != rcv) ||
                   !atomic_add_unless(&vs->refcnt, 1, 0)))
                        vs = ERR_PTR(-EBUSY);
@@ -2391,10 +2625,10 @@ static void vxlan_sock_work(struct work_struct *work)
        dev_put(vxlan->dev);
 }
 
-static int vxlan_newlink(struct net *net, struct net_device *dev,
+static int vxlan_newlink(struct net *src_net, struct net_device *dev,
                         struct nlattr *tb[], struct nlattr *data[])
 {
-       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+       struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_rdst *dst = &vxlan->default_dst;
        __u32 vni;
@@ -2404,7 +2638,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
        if (!data[IFLA_VXLAN_ID])
                return -EINVAL;
 
-       vxlan->net = dev_net(dev);
+       vxlan->net = src_net;
 
        vni = nla_get_u32(data[IFLA_VXLAN_ID]);
        dst->remote_vni = vni;
@@ -2440,7 +2674,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
        if (data[IFLA_VXLAN_LINK] &&
            (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
                struct net_device *lowerdev
-                        = __dev_get_by_index(net, dst->remote_ifindex);
+                        = __dev_get_by_index(src_net, dst->remote_ifindex);
 
                if (!lowerdev) {
                        pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
@@ -2516,8 +2750,19 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
            nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
                vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
 
-       if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
-                          vxlan->dst_port)) {
+       if (data[IFLA_VXLAN_REMCSUM_TX] &&
+           nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
+               vxlan->flags |= VXLAN_F_REMCSUM_TX;
+
+       if (data[IFLA_VXLAN_REMCSUM_RX] &&
+           nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
+               vxlan->flags |= VXLAN_F_REMCSUM_RX;
+
+       if (data[IFLA_VXLAN_GBP])
+               vxlan->flags |= VXLAN_F_GBP;
+
+       if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
+                          vxlan->dst_port, vxlan->flags)) {
                pr_info("duplicate VNI %u\n", vni);
                return -EEXIST;
        }
@@ -2584,6 +2829,8 @@ static size_t vxlan_get_size(const struct net_device *dev)
                nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
                nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
                nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
+               nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
+               nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
                0;
 }
 
@@ -2649,18 +2896,33 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
            nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
                        !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
            nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
-                       !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)))
+                       !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
+           nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
+                       !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
+           nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
+                       !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
                goto nla_put_failure;
 
        if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
                goto nla_put_failure;
 
+       if (vxlan->flags & VXLAN_F_GBP &&
+           nla_put_flag(skb, IFLA_VXLAN_GBP))
+               goto nla_put_failure;
+
        return 0;
 
 nla_put_failure:
        return -EMSGSIZE;
 }
 
+static struct net *vxlan_get_link_net(const struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+
+       return vxlan->net;
+}
+
 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
        .kind           = "vxlan",
        .maxtype        = IFLA_VXLAN_MAX,
@@ -2672,6 +2934,7 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
        .dellink        = vxlan_dellink,
        .get_size       = vxlan_get_size,
        .fill_info      = vxlan_fill_info,
+       .get_link_net   = vxlan_get_link_net,
 };
 
 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
index 94e234975c6114d0e46fde4c71c34625596fe6f4..a2fdd15f285a2367a5b25c3f10e69a1327348ed1 100644 (file)
@@ -25,7 +25,7 @@ if WAN
 # There is no way to detect a comtrol sv11 - force it modular for now.
 config HOSTESS_SV11
        tristate "Comtrol Hostess SV-11 support"
-       depends on ISA && m && ISA_DMA_API && INET && HDLC
+       depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
        help
          Driver for Comtrol Hostess SV-11 network card which
          operates on low speed synchronous serial links at up to
@@ -37,7 +37,7 @@ config HOSTESS_SV11
 # The COSA/SRP driver has not been tested as non-modular yet.
 config COSA
        tristate "COSA/SRP sync serial boards support"
-       depends on ISA && m && ISA_DMA_API && HDLC
+       depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS
        ---help---
          Driver for COSA and SRP synchronous serial boards.
 
@@ -87,7 +87,7 @@ config LANMEDIA
 # There is no way to detect a Sealevel board. Force it modular
 config SEALEVEL_4021
        tristate "Sealevel Systems 4021 support"
-       depends on ISA && m && ISA_DMA_API && INET && HDLC
+       depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
        help
          This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
 
index 1cebd32190686411fb88f53c61448a74ba089a77..d6d2f0f00caad18ec00ba69f5635c79f161b2ba7 100644 (file)
@@ -76,6 +76,9 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
                if (def_idx)
                        arg.key_flags |= WMI_KEY_TX_USAGE;
                break;
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+               /* this one needs to be done in software */
+               return 1;
        default:
                ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
                return -EOPNOTSUPP;
@@ -5268,6 +5271,13 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
 
 int ath10k_mac_register(struct ath10k *ar)
 {
+       static const u32 cipher_suites[] = {
+               WLAN_CIPHER_SUITE_WEP40,
+               WLAN_CIPHER_SUITE_WEP104,
+               WLAN_CIPHER_SUITE_TKIP,
+               WLAN_CIPHER_SUITE_CCMP,
+               WLAN_CIPHER_SUITE_AES_CMAC,
+       };
        struct ieee80211_supported_band *band;
        struct ieee80211_sta_vht_cap vht_cap;
        struct ieee80211_sta_ht_cap ht_cap;
@@ -5342,7 +5352,8 @@ int ath10k_mac_register(struct ath10k *ar)
                        IEEE80211_HW_REPORTS_TX_ACK_STATUS |
                        IEEE80211_HW_HAS_RATE_CONTROL |
                        IEEE80211_HW_AP_LINK_PS |
-                       IEEE80211_HW_SPECTRUM_MGMT;
+                       IEEE80211_HW_SPECTRUM_MGMT |
+                       IEEE80211_HW_SW_CRYPTO_CONTROL;
 
        ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
 
@@ -5429,6 +5440,9 @@ int ath10k_mac_register(struct ath10k *ar)
                goto err_free;
        }
 
+       ar->hw->wiphy->cipher_suites = cipher_suites;
+       ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
        ret = ieee80211_register_hw(ar->hw);
        if (ret) {
                ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
index 98b1e4aa150658adb06577b4d6bee13b7eaf3f08..9ede991b8d767cfd2268a9137dcaa57d171af174 100644 (file)
@@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
 
        __ath_cancel_work(sc);
 
+       disable_irq(sc->irq);
        tasklet_disable(&sc->intr_tq);
        tasklet_disable(&sc->bcon_tasklet);
        spin_lock_bh(&sc->sc_pcu_lock);
@@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
                r = -EIO;
 
 out:
+       enable_irq(sc->irq);
        spin_unlock_bh(&sc->sc_pcu_lock);
        tasklet_enable(&sc->bcon_tasklet);
        tasklet_enable(&sc->intr_tq);
@@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev)
        if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
                return IRQ_NONE;
 
-       if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
-               return IRQ_NONE;
-
        /* shared irq, not for us */
        if (!ath9k_hw_intrpend(ah))
                return IRQ_NONE;
@@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev)
        ath9k_debug_sync_cause(sc, sync_cause);
        status &= ah->imask;    /* discard unasked-for bits */
 
-       if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
+       if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
                return IRQ_HANDLED;
 
        /*
index 494e7335aa6461bf2b67ba4a3624b639d480d100..4a4c6586a8d2dcda2b6f49a5b767bcc48304e138 100644 (file)
@@ -2557,7 +2557,8 @@ static int mac80211_hwsim_get_radio(struct sk_buff *skb,
        if (res < 0)
                goto out_err;
 
-       return genlmsg_end(skb, hdr);
+       genlmsg_end(skb, hdr);
+       return 0;
 
 out_err:
        genlmsg_cancel(skb, hdr);
index 88331d729b0e798d1ae6cf4c2047f23effeefb0a..ec456f0d972eb583f4f2bbfda84472680b2513f0 100644 (file)
@@ -672,7 +672,8 @@ tx_status_ok:
 }
 
 static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
-                                   u8 *entry, int rxring_idx, int desc_idx)
+                                   struct sk_buff *new_skb, u8 *entry,
+                                   int rxring_idx, int desc_idx)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -680,11 +681,15 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
        u8 tmp_one = 1;
        struct sk_buff *skb;
 
+       if (likely(new_skb)) {
+               skb = new_skb;
+               goto remap;
+       }
        skb = dev_alloc_skb(rtlpci->rxbuffersize);
        if (!skb)
                return 0;
-       rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
 
+remap:
        /* just set skb->cb to mapping addr for pci_unmap_single use */
        *((dma_addr_t *)skb->cb) =
                pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
@@ -692,6 +697,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
        bufferaddress = *((dma_addr_t *)skb->cb);
        if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
                return 0;
+       rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
        if (rtlpriv->use_new_trx_flow) {
                rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
                                            HW_DESC_RX_PREPARE,
@@ -787,6 +793,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                /*rx pkt */
                struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
                                      rtlpci->rx_ring[rxring_idx].idx];
+               struct sk_buff *new_skb;
 
                if (rtlpriv->use_new_trx_flow) {
                        rx_remained_cnt =
@@ -813,6 +820,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
                                 rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
 
+               /* get a new skb - if fail, old one will be reused */
+               new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+               if (unlikely(!new_skb)) {
+                       pr_err("Allocation of new skb failed in %s\n",
+                              __func__);
+                       goto no_new;
+               }
                if (rtlpriv->use_new_trx_flow) {
                        buffer_desc =
                          &rtlpci->rx_ring[rxring_idx].buffer_desc
@@ -917,14 +931,16 @@ new_trx_end:
                        rtlpriv->enter_ps = false;
                        schedule_work(&rtlpriv->works.lps_change_work);
                }
+               skb = new_skb;
+no_new:
                if (rtlpriv->use_new_trx_flow) {
-                       _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
+                       _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
                                                 rxring_idx,
-                                              rtlpci->rx_ring[rxring_idx].idx);
+                                                rtlpci->rx_ring[rxring_idx].idx);
                } else {
-                       _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
+                       _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
+                                                rxring_idx,
                                                 rtlpci->rx_ring[rxring_idx].idx);
-
                        if (rtlpci->rx_ring[rxring_idx].idx ==
                            rtlpci->rxringcount - 1)
                                rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
@@ -1313,7 +1329,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
                rtlpci->rx_ring[rxring_idx].idx = 0;
                for (i = 0; i < rtlpci->rxringcount; i++) {
                        entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
-                       if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+                       if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
                                                      rxring_idx, i))
                                return -ENOMEM;
                }
@@ -1338,7 +1354,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
 
                for (i = 0; i < rtlpci->rxringcount; i++) {
                        entry = &rtlpci->rx_ring[rxring_idx].desc[i];
-                       if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+                       if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
                                                      rxring_idx, i))
                                return -ENOMEM;
                }
index 5f1fda44882b764d2adcdc4d02a782740db6cbc5..589fa256256b8ca423bf424544056557c1c7e0ce 100644 (file)
@@ -251,7 +251,6 @@ struct xenvif {
 struct xenvif_rx_cb {
        unsigned long expires;
        int meta_slots_used;
-       bool full_coalesce;
 };
 
 #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
index 9259a732e8a4a6d20740a1ad9170962040b8d1f8..2b2484b4cc29047e0564ed464ab919da892f28b9 100644 (file)
@@ -80,7 +80,7 @@ static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-int xenvif_poll(struct napi_struct *napi, int budget)
+static int xenvif_poll(struct napi_struct *napi, int budget)
 {
        struct xenvif_queue *queue =
                container_of(napi, struct xenvif_queue, napi);
@@ -578,6 +578,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
                goto err_rx_unbind;
        }
        queue->task = task;
+       get_task_struct(task);
 
        task = kthread_create(xenvif_dealloc_kthread,
                              (void *)queue, "%s-dealloc", queue->name);
@@ -634,6 +635,7 @@ void xenvif_disconnect(struct xenvif *vif)
 
                if (queue->task) {
                        kthread_stop(queue->task);
+                       put_task_struct(queue->task);
                        queue->task = NULL;
                }
 
index 908e65e9b8219783ae4b99e5d06e4701478c830d..13899d5099e597b91f46dbfcedbcdf023fb80796 100644 (file)
@@ -233,51 +233,6 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
        }
 }
 
-/*
- * Returns true if we should start a new receive buffer instead of
- * adding 'size' bytes to a buffer which currently contains 'offset'
- * bytes.
- */
-static bool start_new_rx_buffer(int offset, unsigned long size, int head,
-                               bool full_coalesce)
-{
-       /* simple case: we have completely filled the current buffer. */
-       if (offset == MAX_BUFFER_OFFSET)
-               return true;
-
-       /*
-        * complex case: start a fresh buffer if the current frag
-        * would overflow the current buffer but only if:
-        *     (i)   this frag would fit completely in the next buffer
-        * and (ii)  there is already some data in the current buffer
-        * and (iii) this is not the head buffer.
-        * and (iv)  there is no need to fully utilize the buffers
-        *
-        * Where:
-        * - (i) stops us splitting a frag into two copies
-        *   unless the frag is too large for a single buffer.
-        * - (ii) stops us from leaving a buffer pointlessly empty.
-        * - (iii) stops us leaving the first buffer
-        *   empty. Strictly speaking this is already covered
-        *   by (ii) but is explicitly checked because
-        *   netfront relies on the first buffer being
-        *   non-empty and can crash otherwise.
-        * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
-        *   slot
-        *
-        * This means we will effectively linearise small
-        * frags but do not needlessly split large buffers
-        * into multiple copies tend to give large frags their
-        * own buffers as before.
-        */
-       BUG_ON(size > MAX_BUFFER_OFFSET);
-       if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
-           !full_coalesce)
-               return true;
-
-       return false;
-}
-
 struct netrx_pending_operations {
        unsigned copy_prod, copy_cons;
        unsigned meta_prod, meta_cons;
@@ -336,24 +291,13 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
                BUG_ON(offset >= PAGE_SIZE);
                BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
 
-               bytes = PAGE_SIZE - offset;
+               if (npo->copy_off == MAX_BUFFER_OFFSET)
+                       meta = get_next_rx_buffer(queue, npo);
 
+               bytes = PAGE_SIZE - offset;
                if (bytes > size)
                        bytes = size;
 
-               if (start_new_rx_buffer(npo->copy_off,
-                                       bytes,
-                                       *head,
-                                       XENVIF_RX_CB(skb)->full_coalesce)) {
-                       /*
-                        * Netfront requires there to be some data in the head
-                        * buffer.
-                        */
-                       BUG_ON(*head);
-
-                       meta = get_next_rx_buffer(queue, npo);
-               }
-
                if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
                        bytes = MAX_BUFFER_OFFSET - npo->copy_off;
 
@@ -652,60 +596,15 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
 
        while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
               && (skb = xenvif_rx_dequeue(queue)) != NULL) {
-               RING_IDX max_slots_needed;
                RING_IDX old_req_cons;
                RING_IDX ring_slots_used;
-               int i;
 
                queue->last_rx_time = jiffies;
 
-               /* We need a cheap worse case estimate for the number of
-                * slots we'll use.
-                */
-
-               max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
-                                               skb_headlen(skb),
-                                               PAGE_SIZE);
-               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       unsigned int size;
-                       unsigned int offset;
-
-                       size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
-                       offset = skb_shinfo(skb)->frags[i].page_offset;
-
-                       /* For a worse-case estimate we need to factor in
-                        * the fragment page offset as this will affect the
-                        * number of times xenvif_gop_frag_copy() will
-                        * call start_new_rx_buffer().
-                        */
-                       max_slots_needed += DIV_ROUND_UP(offset + size,
-                                                        PAGE_SIZE);
-               }
-
-               /* To avoid the estimate becoming too pessimal for some
-                * frontends that limit posted rx requests, cap the estimate
-                * at MAX_SKB_FRAGS. In this case netback will fully coalesce
-                * the skb into the provided slots.
-                */
-               if (max_slots_needed > MAX_SKB_FRAGS) {
-                       max_slots_needed = MAX_SKB_FRAGS;
-                       XENVIF_RX_CB(skb)->full_coalesce = true;
-               } else {
-                       XENVIF_RX_CB(skb)->full_coalesce = false;
-               }
-
-               /* We may need one more slot for GSO metadata */
-               if (skb_is_gso(skb) &&
-                  (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
-                   skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
-                       max_slots_needed++;
-
                old_req_cons = queue->rx.req_cons;
                XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
                ring_slots_used = queue->rx.req_cons - old_req_cons;
 
-               BUG_ON(ring_slots_used > max_slots_needed);
-
                __skb_queue_tail(&rxq, skb);
        }
 
@@ -2109,8 +2008,7 @@ int xenvif_kthread_guest_rx(void *data)
                 */
                if (unlikely(vif->disabled && queue->id == 0)) {
                        xenvif_carrier_off(vif);
-                       xenvif_rx_queue_purge(queue);
-                       continue;
+                       break;
                }
 
                if (!skb_queue_empty(&queue->rx_queue))
index efbaf2ae1999a97982a8e57274e3cf09f95b42ec..794204e34fba4fb74d1c21f2fef0837ef340feaa 100644 (file)
@@ -737,6 +737,7 @@ static void connect(struct backend_info *be)
                }
 
                queue->remaining_credit = credit_bytes;
+               queue->credit_usec = credit_usec;
 
                err = connect_rings(be, queue);
                if (err) {
index 22bcb4e12e2a1318fc1802fb3c5ff6b2cb4acf92..e9b960f0ff32c8af2ff404a138780ff751bf4572 100644 (file)
@@ -88,10 +88,8 @@ struct netfront_cb {
 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
 
 struct netfront_stats {
-       u64                     rx_packets;
-       u64                     tx_packets;
-       u64                     rx_bytes;
-       u64                     tx_bytes;
+       u64                     packets;
+       u64                     bytes;
        struct u64_stats_sync   syncp;
 };
 
@@ -144,10 +142,6 @@ struct netfront_queue {
        struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
        grant_ref_t gref_rx_head;
        grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
-
-       unsigned long rx_pfn_array[NET_RX_RING_SIZE];
-       struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
-       struct mmu_update rx_mmu[NET_RX_RING_SIZE];
 };
 
 struct netfront_info {
@@ -160,7 +154,8 @@ struct netfront_info {
        struct netfront_queue *queues;
 
        /* Statistics */
-       struct netfront_stats __percpu *stats;
+       struct netfront_stats __percpu *rx_stats;
+       struct netfront_stats __percpu *tx_stats;
 
        atomic_t rx_gso_checksum_fixup;
 };
@@ -224,11 +219,7 @@ static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
 }
 
 #ifdef CONFIG_SYSFS
-static int xennet_sysfs_addif(struct net_device *netdev);
-static void xennet_sysfs_delif(struct net_device *netdev);
-#else /* !CONFIG_SYSFS */
-#define xennet_sysfs_addif(dev) (0)
-#define xennet_sysfs_delif(dev) do { } while (0)
+static const struct attribute_group xennet_dev_group;
 #endif
 
 static bool xennet_can_sg(struct net_device *dev)
@@ -425,109 +416,68 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
        xennet_maybe_wake_tx(queue);
 }
 
-static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
-                             struct xen_netif_tx_request *tx)
+static struct xen_netif_tx_request *xennet_make_one_txreq(
+       struct netfront_queue *queue, struct sk_buff *skb,
+       struct page *page, unsigned int offset, unsigned int len)
 {
-       char *data = skb->data;
-       unsigned long mfn;
-       RING_IDX prod = queue->tx.req_prod_pvt;
-       int frags = skb_shinfo(skb)->nr_frags;
-       unsigned int offset = offset_in_page(data);
-       unsigned int len = skb_headlen(skb);
        unsigned int id;
+       struct xen_netif_tx_request *tx;
        grant_ref_t ref;
-       int i;
 
-       /* While the header overlaps a page boundary (including being
-          larger than a page), split it it into page-sized chunks. */
-       while (len > PAGE_SIZE - offset) {
-               tx->size = PAGE_SIZE - offset;
-               tx->flags |= XEN_NETTXF_more_data;
-               len -= tx->size;
-               data += tx->size;
-               offset = 0;
+       len = min_t(unsigned int, PAGE_SIZE - offset, len);
 
-               id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
-               queue->tx_skbs[id].skb = skb_get(skb);
-               tx = RING_GET_REQUEST(&queue->tx, prod++);
-               tx->id = id;
-               ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
-               BUG_ON((signed short)ref < 0);
+       id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
+       tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
+       ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
+       BUG_ON((signed short)ref < 0);
 
-               mfn = virt_to_mfn(data);
-               gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
-                                               mfn, GNTMAP_readonly);
+       gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
+                                       page_to_mfn(page), GNTMAP_readonly);
 
-               queue->grant_tx_page[id] = virt_to_page(data);
-               tx->gref = queue->grant_tx_ref[id] = ref;
-               tx->offset = offset;
-               tx->size = len;
-               tx->flags = 0;
-       }
+       queue->tx_skbs[id].skb = skb;
+       queue->grant_tx_page[id] = page;
+       queue->grant_tx_ref[id] = ref;
 
-       /* Grant backend access to each skb fragment page. */
-       for (i = 0; i < frags; i++) {
-               skb_frag_t *frag = skb_shinfo(skb)->frags + i;
-               struct page *page = skb_frag_page(frag);
+       tx->id = id;
+       tx->gref = ref;
+       tx->offset = offset;
+       tx->size = len;
+       tx->flags = 0;
 
-               len = skb_frag_size(frag);
-               offset = frag->page_offset;
+       return tx;
+}
 
-               /* Skip unused frames from start of page */
-               page += offset >> PAGE_SHIFT;
-               offset &= ~PAGE_MASK;
+static struct xen_netif_tx_request *xennet_make_txreqs(
+       struct netfront_queue *queue, struct xen_netif_tx_request *tx,
+       struct sk_buff *skb, struct page *page,
+       unsigned int offset, unsigned int len)
+{
+       /* Skip unused frames from start of page */
+       page += offset >> PAGE_SHIFT;
+       offset &= ~PAGE_MASK;
 
-               while (len > 0) {
-                       unsigned long bytes;
-
-                       bytes = PAGE_SIZE - offset;
-                       if (bytes > len)
-                               bytes = len;
-
-                       tx->flags |= XEN_NETTXF_more_data;
-
-                       id = get_id_from_freelist(&queue->tx_skb_freelist,
-                                                 queue->tx_skbs);
-                       queue->tx_skbs[id].skb = skb_get(skb);
-                       tx = RING_GET_REQUEST(&queue->tx, prod++);
-                       tx->id = id;
-                       ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
-                       BUG_ON((signed short)ref < 0);
-
-                       mfn = pfn_to_mfn(page_to_pfn(page));
-                       gnttab_grant_foreign_access_ref(ref,
-                                                       queue->info->xbdev->otherend_id,
-                                                       mfn, GNTMAP_readonly);
-
-                       queue->grant_tx_page[id] = page;
-                       tx->gref = queue->grant_tx_ref[id] = ref;
-                       tx->offset = offset;
-                       tx->size = bytes;
-                       tx->flags = 0;
-
-                       offset += bytes;
-                       len -= bytes;
-
-                       /* Next frame */
-                       if (offset == PAGE_SIZE && len) {
-                               BUG_ON(!PageCompound(page));
-                               page++;
-                               offset = 0;
-                       }
-               }
+       while (len) {
+               tx->flags |= XEN_NETTXF_more_data;
+               tx = xennet_make_one_txreq(queue, skb_get(skb),
+                                          page, offset, len);
+               page++;
+               offset = 0;
+               len -= tx->size;
        }
 
-       queue->tx.req_prod_pvt = prod;
+       return tx;
 }
 
 /*
- * Count how many ring slots are required to send the frags of this
- * skb. Each frag might be a compound page.
+ * Count how many ring slots are required to send this skb. Each frag
+ * might be a compound page.
  */
-static int xennet_count_skb_frag_slots(struct sk_buff *skb)
+static int xennet_count_skb_slots(struct sk_buff *skb)
 {
        int i, frags = skb_shinfo(skb)->nr_frags;
-       int pages = 0;
+       int pages;
+
+       pages = PFN_UP(offset_in_page(skb->data) + skb_headlen(skb));
 
        for (i = 0; i < frags; i++) {
                skb_frag_t *frag = skb_shinfo(skb)->frags + i;
@@ -563,18 +513,15 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
 
 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       unsigned short id;
        struct netfront_info *np = netdev_priv(dev);
-       struct netfront_stats *stats = this_cpu_ptr(np->stats);
-       struct xen_netif_tx_request *tx;
-       char *data = skb->data;
-       RING_IDX i;
-       grant_ref_t ref;
-       unsigned long mfn;
+       struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
+       struct xen_netif_tx_request *tx, *first_tx;
+       unsigned int i;
        int notify;
        int slots;
-       unsigned int offset = offset_in_page(data);
-       unsigned int len = skb_headlen(skb);
+       struct page *page;
+       unsigned int offset;
+       unsigned int len;
        unsigned long flags;
        struct netfront_queue *queue = NULL;
        unsigned int num_queues = dev->real_num_tx_queues;
@@ -597,18 +544,18 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
                goto drop;
        }
 
-       slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
-               xennet_count_skb_frag_slots(skb);
+       slots = xennet_count_skb_slots(skb);
        if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
                net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
                                    slots, skb->len);
                if (skb_linearize(skb))
                        goto drop;
-               data = skb->data;
-               offset = offset_in_page(data);
-               len = skb_headlen(skb);
        }
 
+       page = virt_to_page(skb->data);
+       offset = offset_in_page(skb->data);
+       len = skb_headlen(skb);
+
        spin_lock_irqsave(&queue->tx_lock, flags);
 
        if (unlikely(!netif_carrier_ok(dev) ||
@@ -618,25 +565,13 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
                goto drop;
        }
 
-       i = queue->tx.req_prod_pvt;
-
-       id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
-       queue->tx_skbs[id].skb = skb;
-
-       tx = RING_GET_REQUEST(&queue->tx, i);
-
-       tx->id   = id;
-       ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
-       BUG_ON((signed short)ref < 0);
-       mfn = virt_to_mfn(data);
-       gnttab_grant_foreign_access_ref(
-               ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly);
-       queue->grant_tx_page[id] = virt_to_page(data);
-       tx->gref = queue->grant_tx_ref[id] = ref;
-       tx->offset = offset;
-       tx->size = len;
+       /* First request for the linear area. */
+       first_tx = tx = xennet_make_one_txreq(queue, skb,
+                                             page, offset, len);
+       page++;
+       offset = 0;
+       len -= tx->size;
 
-       tx->flags = 0;
        if (skb->ip_summed == CHECKSUM_PARTIAL)
                /* local packet? */
                tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
@@ -644,11 +579,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
                /* remote but checksummed. */
                tx->flags |= XEN_NETTXF_data_validated;
 
+       /* Optional extra info after the first request. */
        if (skb_shinfo(skb)->gso_size) {
                struct xen_netif_extra_info *gso;
 
                gso = (struct xen_netif_extra_info *)
-                       RING_GET_REQUEST(&queue->tx, ++i);
+                       RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 
                tx->flags |= XEN_NETTXF_extra_info;
 
@@ -663,19 +599,28 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
                gso->flags = 0;
        }
 
-       queue->tx.req_prod_pvt = i + 1;
+       /* Requests for the rest of the linear area. */
+       tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
+
+       /* Requests for all the frags. */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               tx = xennet_make_txreqs(queue, tx, skb,
+                                       skb_frag_page(frag), frag->page_offset,
+                                       skb_frag_size(frag));
+       }
 
-       xennet_make_frags(skb, queue, tx);
-       tx->size = skb->len;
+       /* First request has the packet length. */
+       first_tx->size = skb->len;
 
        RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
        if (notify)
                notify_remote_via_irq(queue->tx_irq);
 
-       u64_stats_update_begin(&stats->syncp);
-       stats->tx_bytes += skb->len;
-       stats->tx_packets++;
-       u64_stats_update_end(&stats->syncp);
+       u64_stats_update_begin(&tx_stats->syncp);
+       tx_stats->bytes += skb->len;
+       tx_stats->packets++;
+       u64_stats_update_end(&tx_stats->syncp);
 
        /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
        xennet_tx_buf_gc(queue);
@@ -931,7 +876,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 static int handle_incoming_queue(struct netfront_queue *queue,
                                 struct sk_buff_head *rxq)
 {
-       struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
+       struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
        int packets_dropped = 0;
        struct sk_buff *skb;
 
@@ -952,10 +897,10 @@ static int handle_incoming_queue(struct netfront_queue *queue,
                        continue;
                }
 
-               u64_stats_update_begin(&stats->syncp);
-               stats->rx_packets++;
-               stats->rx_bytes += skb->len;
-               u64_stats_update_end(&stats->syncp);
+               u64_stats_update_begin(&rx_stats->syncp);
+               rx_stats->packets++;
+               rx_stats->bytes += skb->len;
+               u64_stats_update_end(&rx_stats->syncp);
 
                /* Pass it up. */
                napi_gro_receive(&queue->napi, skb);
@@ -1079,18 +1024,22 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
        int cpu;
 
        for_each_possible_cpu(cpu) {
-               struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
+               struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
+               struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
                u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
                unsigned int start;
 
                do {
-                       start = u64_stats_fetch_begin_irq(&stats->syncp);
+                       start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+                       tx_packets = tx_stats->packets;
+                       tx_bytes = tx_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
 
-                       rx_packets = stats->rx_packets;
-                       tx_packets = stats->tx_packets;
-                       rx_bytes = stats->rx_bytes;
-                       tx_bytes = stats->tx_bytes;
-               } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+               do {
+                       start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+                       rx_packets = rx_stats->packets;
+                       rx_bytes = rx_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
 
                tot->rx_packets += rx_packets;
                tot->tx_packets += tx_packets;
@@ -1275,6 +1224,15 @@ static const struct net_device_ops xennet_netdev_ops = {
 #endif
 };
 
+static void xennet_free_netdev(struct net_device *netdev)
+{
+       struct netfront_info *np = netdev_priv(netdev);
+
+       free_percpu(np->rx_stats);
+       free_percpu(np->tx_stats);
+       free_netdev(netdev);
+}
+
 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
 {
        int err;
@@ -1295,8 +1253,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        np->queues = NULL;
 
        err = -ENOMEM;
-       np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
-       if (np->stats == NULL)
+       np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+       if (np->rx_stats == NULL)
+               goto exit;
+       np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+       if (np->tx_stats == NULL)
                goto exit;
 
        netdev->netdev_ops      = &xennet_netdev_ops;
@@ -1327,7 +1288,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        return netdev;
 
  exit:
-       free_netdev(netdev);
+       xennet_free_netdev(netdev);
        return ERR_PTR(err);
 }
 
@@ -1352,24 +1313,19 @@ static int netfront_probe(struct xenbus_device *dev,
 
        info = netdev_priv(netdev);
        dev_set_drvdata(&dev->dev, info);
-
+#ifdef CONFIG_SYSFS
+       info->netdev->sysfs_groups[0] = &xennet_dev_group;
+#endif
        err = register_netdev(info->netdev);
        if (err) {
                pr_warn("%s: register_netdev err=%d\n", __func__, err);
                goto fail;
        }
 
-       err = xennet_sysfs_addif(info->netdev);
-       if (err) {
-               unregister_netdev(info->netdev);
-               pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
-               goto fail;
-       }
-
        return 0;
 
  fail:
-       free_netdev(netdev);
+       xennet_free_netdev(netdev);
        dev_set_drvdata(&dev->dev, NULL);
        return err;
 }
@@ -2129,39 +2085,20 @@ static ssize_t store_rxbuf(struct device *dev,
        return len;
 }
 
-static struct device_attribute xennet_attrs[] = {
-       __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf),
-       __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf),
-       __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL),
-};
+static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
+static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
+static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL);
 
-static int xennet_sysfs_addif(struct net_device *netdev)
-{
-       int i;
-       int err;
-
-       for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
-               err = device_create_file(&netdev->dev,
-                                          &xennet_attrs[i]);
-               if (err)
-                       goto fail;
-       }
-       return 0;
-
- fail:
-       while (--i >= 0)
-               device_remove_file(&netdev->dev, &xennet_attrs[i]);
-       return err;
-}
-
-static void xennet_sysfs_delif(struct net_device *netdev)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
-               device_remove_file(&netdev->dev, &xennet_attrs[i]);
-}
+static struct attribute *xennet_dev_attrs[] = {
+       &dev_attr_rxbuf_min.attr,
+       &dev_attr_rxbuf_max.attr,
+       &dev_attr_rxbuf_cur.attr,
+       NULL
+};
 
+static const struct attribute_group xennet_dev_group = {
+       .attrs = xennet_dev_attrs
+};
 #endif /* CONFIG_SYSFS */
 
 static int xennet_remove(struct xenbus_device *dev)
@@ -2175,8 +2112,6 @@ static int xennet_remove(struct xenbus_device *dev)
 
        xennet_disconnect_backend(info);
 
-       xennet_sysfs_delif(info->netdev);
-
        unregister_netdev(info->netdev);
 
        for (i = 0; i < num_queues; ++i) {
@@ -2189,9 +2124,7 @@ static int xennet_remove(struct xenbus_device *dev)
                info->queues = NULL;
        }
 
-       free_percpu(info->stats);
-
-       free_netdev(info->netdev);
+       xennet_free_netdev(info->netdev);
 
        return 0;
 }
index 963a4a5dc88e5aa2e13d814b5731fb1f67f74102..f454dc68cc034ca487a014d2043f1abc93b8559d 100644 (file)
@@ -557,10 +557,11 @@ exit:
                pr_err("Failed to handle discovered target err=%d\n", r);
 }
 
-static int microread_event_received(struct nfc_hci_dev *hdev, u8 gate,
+static int microread_event_received(struct nfc_hci_dev *hdev, u8 pipe,
                                     u8 event, struct sk_buff *skb)
 {
        int r;
+       u8 gate = hdev->pipes[pipe].gate;
        u8 mode;
 
        pr_info("Microread received event 0x%x to gate 0x%x\n", event, gate);
index fc02e8d6a1936d89dffae06d78441cd0ba62ab5a..cdde745b96bd142e07a90b441447ed57bd94b087 100644 (file)
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
 #include <linux/of_irq.h>
+#include <linux/acpi.h>
 #include <linux/miscdevice.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/nfc.h>
 #include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
 #include <linux/platform_data/pn544.h>
 #include <asm/unaligned.h>
 
 #define PN544_I2C_FRAME_HEADROOM 1
 #define PN544_I2C_FRAME_TAILROOM 2
 
+/* GPIO names */
+#define PN544_GPIO_NAME_IRQ "pn544_irq"
+#define PN544_GPIO_NAME_FW  "pn544_fw"
+#define PN544_GPIO_NAME_EN  "pn544_en"
+
 /* framing in HCI mode */
 #define PN544_HCI_I2C_LLC_LEN          1
 #define PN544_HCI_I2C_LLC_CRC          2
@@ -58,6 +65,13 @@ static struct i2c_device_id pn544_hci_i2c_id_table[] = {
 
 MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table);
 
+static const struct acpi_device_id pn544_hci_i2c_acpi_match[] = {
+       {"NXP5440", 0},
+       {}
+};
+
+MODULE_DEVICE_TABLE(acpi, pn544_hci_i2c_acpi_match);
+
 #define PN544_HCI_I2C_DRIVER_NAME "pn544_hci_i2c"
 
 /*
@@ -195,18 +209,19 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
        nfc_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n");
 
        /* Disable fw download */
-       gpio_set_value(phy->gpio_fw, 0);
+       gpio_set_value_cansleep(phy->gpio_fw, 0);
 
        for (polarity = 0; polarity < 2; polarity++) {
                phy->en_polarity = polarity;
                retry = 3;
                while (retry--) {
                        /* power off */
-                       gpio_set_value(phy->gpio_en, !phy->en_polarity);
+                       gpio_set_value_cansleep(phy->gpio_en,
+                                               !phy->en_polarity);
                        usleep_range(10000, 15000);
 
                        /* power on */
-                       gpio_set_value(phy->gpio_en, phy->en_polarity);
+                       gpio_set_value_cansleep(phy->gpio_en, phy->en_polarity);
                        usleep_range(10000, 15000);
 
                        /* send reset */
@@ -225,13 +240,14 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
                "Could not detect nfc_en polarity, fallback to active high\n");
 
 out:
-       gpio_set_value(phy->gpio_en, !phy->en_polarity);
+       gpio_set_value_cansleep(phy->gpio_en, !phy->en_polarity);
 }
 
 static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode)
 {
-       gpio_set_value(phy->gpio_fw, run_mode == PN544_FW_MODE ? 1 : 0);
-       gpio_set_value(phy->gpio_en, phy->en_polarity);
+       gpio_set_value_cansleep(phy->gpio_fw,
+                               run_mode == PN544_FW_MODE ? 1 : 0);
+       gpio_set_value_cansleep(phy->gpio_en, phy->en_polarity);
        usleep_range(10000, 15000);
 
        phy->run_mode = run_mode;
@@ -254,14 +270,14 @@ static void pn544_hci_i2c_disable(void *phy_id)
 {
        struct pn544_i2c_phy *phy = phy_id;
 
-       gpio_set_value(phy->gpio_fw, 0);
-       gpio_set_value(phy->gpio_en, !phy->en_polarity);
+       gpio_set_value_cansleep(phy->gpio_fw, 0);
+       gpio_set_value_cansleep(phy->gpio_en, !phy->en_polarity);
        usleep_range(10000, 15000);
 
-       gpio_set_value(phy->gpio_en, phy->en_polarity);
+       gpio_set_value_cansleep(phy->gpio_en, phy->en_polarity);
        usleep_range(10000, 15000);
 
-       gpio_set_value(phy->gpio_en, !phy->en_polarity);
+       gpio_set_value_cansleep(phy->gpio_en, !phy->en_polarity);
        usleep_range(10000, 15000);
 
        phy->powered = 0;
@@ -859,6 +875,90 @@ exit_state_wait_secure_write_answer:
        }
 }
 
+static int pn544_hci_i2c_acpi_request_resources(struct i2c_client *client)
+{
+       struct pn544_i2c_phy *phy = i2c_get_clientdata(client);
+       const struct acpi_device_id *id;
+       struct gpio_desc *gpiod_en, *gpiod_irq, *gpiod_fw;
+       struct device *dev;
+       int ret;
+
+       if (!client)
+               return -EINVAL;
+
+       dev = &client->dev;
+
+       /* Match the struct device against a given list of ACPI IDs */
+       id = acpi_match_device(dev->driver->acpi_match_table, dev);
+
+       if (!id)
+               return -ENODEV;
+
+       /* Get EN GPIO from ACPI */
+       gpiod_en = devm_gpiod_get_index(dev, PN544_GPIO_NAME_EN, 1);
+       if (IS_ERR(gpiod_en)) {
+               nfc_err(dev,
+                       "Unable to get EN GPIO\n");
+               return -ENODEV;
+       }
+
+       phy->gpio_en  = desc_to_gpio(gpiod_en);
+
+       /* Configuration EN GPIO */
+       ret = gpiod_direction_output(gpiod_en, 0);
+       if (ret) {
+               nfc_err(dev, "Fail EN pin direction\n");
+               return ret;
+       }
+
+       /* Get FW GPIO from ACPI */
+       gpiod_fw = devm_gpiod_get_index(dev, PN544_GPIO_NAME_FW, 2);
+       if (IS_ERR(gpiod_fw)) {
+               nfc_err(dev,
+                       "Unable to get FW GPIO\n");
+               return -ENODEV;
+       }
+
+       phy->gpio_fw  = desc_to_gpio(gpiod_fw);
+
+       /* Configuration FW GPIO */
+       ret = gpiod_direction_output(gpiod_fw, 0);
+       if (ret) {
+               nfc_err(dev, "Fail FW pin direction\n");
+               return ret;
+       }
+
+       /* Get IRQ GPIO */
+       gpiod_irq = devm_gpiod_get_index(dev, PN544_GPIO_NAME_IRQ, 0);
+       if (IS_ERR(gpiod_irq)) {
+               nfc_err(dev,
+                       "Unable to get IRQ GPIO\n");
+               return -ENODEV;
+       }
+
+       phy->gpio_irq = desc_to_gpio(gpiod_irq);
+
+       /* Configure IRQ GPIO */
+       ret = gpiod_direction_input(gpiod_irq);
+       if (ret) {
+               nfc_err(dev, "Fail IRQ pin direction\n");
+               return ret;
+       }
+
+       /* Map the pin to an IRQ */
+       ret = gpiod_to_irq(gpiod_irq);
+       if (ret < 0) {
+               nfc_err(dev, "Fail pin IRQ mapping\n");
+               return ret;
+       }
+
+       nfc_info(dev, "GPIO resource, no:%d irq:%d\n",
+                       desc_to_gpio(gpiod_irq), ret);
+       client->irq = ret;
+
+       return 0;
+}
+
 #ifdef CONFIG_OF
 
 static int pn544_hci_i2c_of_request_resources(struct i2c_client *client)
@@ -884,7 +984,7 @@ static int pn544_hci_i2c_of_request_resources(struct i2c_client *client)
        phy->gpio_en = ret;
 
        /* Configuration of EN GPIO */
-       ret = gpio_request(phy->gpio_en, "pn544_en");
+       ret = gpio_request(phy->gpio_en, PN544_GPIO_NAME_EN);
        if (ret) {
                nfc_err(&client->dev, "Fail EN pin\n");
                goto err_dt;
@@ -906,7 +1006,7 @@ static int pn544_hci_i2c_of_request_resources(struct i2c_client *client)
        phy->gpio_fw = ret;
 
        /* Configuration of FW GPIO */
-       ret = gpio_request(phy->gpio_fw, "pn544_fw");
+       ret = gpio_request(phy->gpio_fw, PN544_GPIO_NAME_FW);
        if (ret) {
                nfc_err(&client->dev, "Fail FW pin\n");
                goto err_gpio_en;
@@ -1001,6 +1101,14 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
                phy->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
                phy->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET);
                phy->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ);
+       /* Using ACPI */
+       } else if (ACPI_HANDLE(&client->dev)) {
+               r = pn544_hci_i2c_acpi_request_resources(client);
+               if (r) {
+                       nfc_err(&client->dev,
+                               "Cannot get ACPI data\n");
+                       return r;
+               }
        } else {
                nfc_err(&client->dev, "No platform data\n");
                return -EINVAL;
@@ -1080,6 +1188,7 @@ static struct i2c_driver pn544_hci_i2c_driver = {
                   .name = PN544_HCI_I2C_DRIVER_NAME,
                   .owner  = THIS_MODULE,
                   .of_match_table = of_match_ptr(of_pn544_i2c_match),
+                  .acpi_match_table = ACPI_PTR(pn544_hci_i2c_acpi_match),
                  },
        .probe = pn544_hci_i2c_probe,
        .id_table = pn544_hci_i2c_id_table,
index 9c8051d20cea98fa4377edb79167ddef2b14cf24..12e819ddf17a708c94ab0c122ac852f0199c85aa 100644 (file)
@@ -724,10 +724,11 @@ static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
  * <= 0: driver handled the event, skb consumed
  *    1: driver does not handle the event, please do standard processing
  */
-static int pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 gate, u8 event,
+static int pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
                                    struct sk_buff *skb)
 {
        struct sk_buff *rgb_skb = NULL;
+       u8 gate = hdev->pipes[pipe].gate;
        int r;
 
        pr_debug("hci event %d\n", event);
index 7d688f97aa278311731f7e5d8a96af9b974cb253..97edab4bbdf8bb7c0a3ac1a21fdf45e4e59a4270 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for ST21NFCA HCI based NFC driver
 #
 
-st21nfca_hci-objs = st21nfca.o st21nfca_dep.o
+st21nfca_hci-objs = st21nfca.o st21nfca_dep.o st21nfca_se.o
 obj-$(CONFIG_NFC_ST21NFCA)     += st21nfca_hci.o
 
 st21nfca_i2c-objs  = i2c.o
index 05722085a59fafb4c8a66db1095a88bcb25d7970..a32143951616fde13b5828938f3408b5209130cd 100644 (file)
@@ -74,6 +74,8 @@ struct st21nfca_i2c_phy {
        unsigned int gpio_ena;
        unsigned int irq_polarity;
 
+       struct st21nfca_se_status se_status;
+
        struct sk_buff *pending_skb;
        int current_read_len;
        /*
@@ -537,6 +539,11 @@ static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client)
 
        phy->irq_polarity = irq_get_trigger_type(client->irq);
 
+       phy->se_status.is_ese_present =
+                               of_property_read_bool(pp, "ese-present");
+       phy->se_status.is_uicc_present =
+                               of_property_read_bool(pp, "uicc-present");
+
        return 0;
 }
 #else
@@ -571,6 +578,9 @@ static int st21nfca_hci_i2c_request_resources(struct i2c_client *client)
                }
        }
 
+       phy->se_status.is_ese_present = pdata->is_ese_present;
+       phy->se_status.is_uicc_present = pdata->is_uicc_present;
+
        return 0;
 }
 
@@ -591,11 +601,8 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
 
        phy = devm_kzalloc(&client->dev, sizeof(struct st21nfca_i2c_phy),
                           GFP_KERNEL);
-       if (!phy) {
-               nfc_err(&client->dev,
-                       "Cannot allocate memory for st21nfca i2c phy.\n");
+       if (!phy)
                return -ENOMEM;
-       }
 
        phy->i2c_dev = client;
        phy->pending_skb = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE * 2, GFP_KERNEL);
@@ -641,8 +648,11 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
        }
 
        return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
-                              ST21NFCA_FRAME_HEADROOM, ST21NFCA_FRAME_TAILROOM,
-                              ST21NFCA_HCI_LLC_MAX_PAYLOAD, &phy->hdev);
+                                       ST21NFCA_FRAME_HEADROOM,
+                                       ST21NFCA_FRAME_TAILROOM,
+                                       ST21NFCA_HCI_LLC_MAX_PAYLOAD,
+                                       &phy->hdev,
+                                       &phy->se_status);
 }
 
 static int st21nfca_hci_i2c_remove(struct i2c_client *client)
@@ -661,6 +671,7 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
 
 #ifdef CONFIG_OF
 static const struct of_device_id of_st21nfca_i2c_match[] = {
+       { .compatible = "st,st21nfca-i2c", },
        { .compatible = "st,st21nfca_i2c", },
        {}
 };
index f2596c8d68b0b36b12501914e7a1ea49ce16c058..24d3d240d5f42c83484925ed110f3a9dbf74ffcf 100644 (file)
@@ -23,6 +23,7 @@
 
 #include "st21nfca.h"
 #include "st21nfca_dep.h"
+#include "st21nfca_se.h"
 
 #define DRIVER_DESC "HCI NFC driver for ST21NFCA"
 
@@ -62,7 +63,6 @@
 #define ST21NFCA_RF_CARD_F_DATARATE            0x08
 #define ST21NFCA_RF_CARD_F_DATARATE_212_424    0x01
 
-#define ST21NFCA_DEVICE_MGNT_GATE              0x01
 #define ST21NFCA_DEVICE_MGNT_PIPE              0x02
 
 #define ST21NFCA_DM_GETINFO                    0x13
 
 #define ST21NFCA_NFC_MODE                      0x03    /* NFC_MODE parameter*/
 
+#define ST21NFCA_EVT_HOT_PLUG                  0x03
+#define ST21NFCA_EVT_HOT_PLUG_IS_INHIBITED(x) (x->data[0] & 0x80)
+
+#define ST21NFCA_SE_TO_PIPES                   2000
+
 static DECLARE_BITMAP(dev_mask, ST21NFCA_NUM_DEVICES);
 
 static struct nfc_hci_gate st21nfca_gates[] = {
@@ -92,6 +97,10 @@ static struct nfc_hci_gate st21nfca_gates[] = {
        {ST21NFCA_RF_READER_14443_3_A_GATE, NFC_HCI_INVALID_PIPE},
        {ST21NFCA_RF_READER_ISO15693_GATE, NFC_HCI_INVALID_PIPE},
        {ST21NFCA_RF_CARD_F_GATE, NFC_HCI_INVALID_PIPE},
+
+       /* Secure element pipes are created by secure element host */
+       {ST21NFCA_CONNECTIVITY_GATE, NFC_HCI_DO_NOT_CREATE_PIPE},
+       {ST21NFCA_APDU_READER_GATE, NFC_HCI_DO_NOT_CREATE_PIPE},
 };
 
 struct st21nfca_pipe_info {
@@ -118,18 +127,6 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
                NFC_HCI_TERMINAL_HOST_ID, 0
        };
 
-       skb_pipe_list = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE, GFP_KERNEL);
-       if (!skb_pipe_list) {
-               r = -ENOMEM;
-               goto free_list;
-       }
-
-       skb_pipe_info = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE, GFP_KERNEL);
-       if (!skb_pipe_info) {
-               r = -ENOMEM;
-               goto free_info;
-       }
-
        /* On ST21NFCA device pipes number are dynamics
         * A maximum of 16 pipes can be created at the same time
         * If pipes are already created, hci_dev_up will fail.
@@ -148,7 +145,8 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
         * Pipe can be closed and need to be open.
         */
        r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
-               ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE);
+                               ST21NFCA_DEVICE_MGNT_GATE,
+                               ST21NFCA_DEVICE_MGNT_PIPE);
        if (r < 0)
                goto free_info;
 
@@ -179,17 +177,28 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
                 * - destination gid (1byte)
                 */
                info = (struct st21nfca_pipe_info *) skb_pipe_info->data;
+               if (info->dst_gate_id == ST21NFCA_APDU_READER_GATE &&
+                       info->src_host_id != ST21NFCA_ESE_HOST_ID) {
+                       pr_err("Unexpected apdu_reader pipe on host %x\n",
+                               info->src_host_id);
+                       continue;
+               }
+
                for (j = 0; (j < ARRAY_SIZE(st21nfca_gates)) &&
-                       (st21nfca_gates[j].gate != info->dst_gate_id);
-                       j++)
+                       (st21nfca_gates[j].gate != info->dst_gate_id) ; j++)
                        ;
 
                if (j < ARRAY_SIZE(st21nfca_gates) &&
                        st21nfca_gates[j].gate == info->dst_gate_id &&
                        ST21NFCA_DM_IS_PIPE_OPEN(info->pipe_state)) {
                        st21nfca_gates[j].pipe = pipe_info[2];
+
                        hdev->gate2pipe[st21nfca_gates[j].gate] =
-                               st21nfca_gates[j].pipe;
+                                                       st21nfca_gates[j].pipe;
+                       hdev->pipes[st21nfca_gates[j].pipe].gate =
+                                                       st21nfca_gates[j].gate;
+                       hdev->pipes[st21nfca_gates[j].pipe].dest_host =
+                                                       info->src_host_id;
                }
        }
 
@@ -199,7 +208,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
         */
        if (skb_pipe_list->len + 3 < ARRAY_SIZE(st21nfca_gates)) {
                for (i = skb_pipe_list->len + 3;
-                               i < ARRAY_SIZE(st21nfca_gates); i++) {
+                               i < ARRAY_SIZE(st21nfca_gates) - 2; i++) {
                        r = nfc_hci_connect_gate(hdev,
                                        NFC_HCI_HOST_CONTROLLER_ID,
                                        st21nfca_gates[i].gate,
@@ -212,7 +221,6 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
        memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
 free_info:
        kfree_skb(skb_pipe_info);
-free_list:
        kfree_skb(skb_pipe_list);
        return r;
 }
@@ -257,16 +265,33 @@ out:
 
 static int st21nfca_hci_ready(struct nfc_hci_dev *hdev)
 {
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
        struct sk_buff *skb;
 
        u8 param;
+       u8 white_list[2];
+       int wl_size = 0;
        int r;
 
-       param = NFC_HCI_UICC_HOST_ID;
-       r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
-                             NFC_HCI_ADMIN_WHITELIST, &param, 1);
-       if (r < 0)
-               return r;
+       if (info->se_status->is_ese_present &&
+               info->se_status->is_uicc_present) {
+               white_list[wl_size++] = NFC_HCI_UICC_HOST_ID;
+               white_list[wl_size++] = ST21NFCA_ESE_HOST_ID;
+       } else if (!info->se_status->is_ese_present &&
+                        info->se_status->is_uicc_present) {
+               white_list[wl_size++] = NFC_HCI_UICC_HOST_ID;
+       } else if (info->se_status->is_ese_present &&
+                       !info->se_status->is_uicc_present) {
+               white_list[wl_size++] = ST21NFCA_ESE_HOST_ID;
+       }
+
+       if (wl_size) {
+               r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
+                                       NFC_HCI_ADMIN_WHITELIST,
+                                       (u8 *) &white_list, wl_size);
+               if (r < 0)
+                       return r;
+       }
 
        /* Set NFC_MODE in device management gate to enable */
        r = nfc_hci_get_param(hdev, ST21NFCA_DEVICE_MGNT_GATE,
@@ -274,8 +299,9 @@ static int st21nfca_hci_ready(struct nfc_hci_dev *hdev)
        if (r < 0)
                return r;
 
-       if (skb->data[0] == 0) {
-               kfree_skb(skb);
+       param = skb->data[0];
+       kfree_skb(skb);
+       if (param == 0) {
                param = 1;
 
                r = nfc_hci_set_param(hdev, ST21NFCA_DEVICE_MGNT_GATE,
@@ -417,9 +443,12 @@ static int st21nfca_hci_start_poll(struct nfc_hci_dev *hdev,
                        r = nfc_hci_set_param(hdev, ST21NFCA_RF_CARD_F_GATE,
                                              ST21NFCA_RF_CARD_F_DATARATE,
                                              param, 1);
-                       if (r < 0)
+                       if (r < 0) {
+                               kfree_skb(datarate_skb);
                                return r;
+                       }
                }
+               kfree_skb(datarate_skb);
 
                /*
                 * Configure sens_res
@@ -673,15 +702,15 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
                                                struct nfc_target *target)
 {
        int r;
-       struct sk_buff *nfcid2_skb = NULL, *nfcid1_skb;
+       struct sk_buff *nfcid_skb = NULL;
 
        if (gate == ST21NFCA_RF_READER_F_GATE) {
                r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE,
-                               ST21NFCA_RF_READER_F_NFCID2, &nfcid2_skb);
+                               ST21NFCA_RF_READER_F_NFCID2, &nfcid_skb);
                if (r < 0)
                        goto exit;
 
-               if (nfcid2_skb->len > NFC_SENSF_RES_MAXSIZE) {
+               if (nfcid_skb->len > NFC_SENSF_RES_MAXSIZE) {
                        r = -EPROTO;
                        goto exit;
                }
@@ -693,11 +722,11 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
                 * - After the reception of SEL_RES with NFCIP-1 compliant bit
                 * set for type A frame NFCID1 will be updated
                 */
-               if (nfcid2_skb->len > 0) {
+               if (nfcid_skb->len > 0) {
                        /* P2P in type F */
-                       memcpy(target->sensf_res, nfcid2_skb->data,
-                               nfcid2_skb->len);
-                       target->sensf_res_len = nfcid2_skb->len;
+                       memcpy(target->sensf_res, nfcid_skb->data,
+                               nfcid_skb->len);
+                       target->sensf_res_len = nfcid_skb->len;
                        /* NFC Forum Digital Protocol Table 44 */
                        if (target->sensf_res[0] == 0x01 &&
                            target->sensf_res[1] == 0xfe)
@@ -707,27 +736,28 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
                                target->supported_protocols =
                                                        NFC_PROTO_FELICA_MASK;
                } else {
+                       kfree_skb(nfcid_skb);
                        /* P2P in type A */
                        r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE,
                                        ST21NFCA_RF_READER_F_NFCID1,
-                                       &nfcid1_skb);
+                                       &nfcid_skb);
                        if (r < 0)
                                goto exit;
 
-                       if (nfcid1_skb->len > NFC_NFCID1_MAXSIZE) {
+                       if (nfcid_skb->len > NFC_NFCID1_MAXSIZE) {
                                r = -EPROTO;
                                goto exit;
                        }
-                       memcpy(target->sensf_res, nfcid1_skb->data,
-                               nfcid1_skb->len);
-                       target->sensf_res_len = nfcid1_skb->len;
+                       memcpy(target->sensf_res, nfcid_skb->data,
+                               nfcid_skb->len);
+                       target->sensf_res_len = nfcid_skb->len;
                        target->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
                }
                target->hci_reader_gate = ST21NFCA_RF_READER_F_GATE;
        }
        r = 1;
 exit:
-       kfree_skb(nfcid2_skb);
+       kfree_skb(nfcid_skb);
        return r;
 }
 
@@ -829,24 +859,82 @@ static int st21nfca_hci_check_presence(struct nfc_hci_dev *hdev,
        }
 }
 
+static void st21nfca_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+                               struct sk_buff *skb)
+{
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+       u8 gate = hdev->pipes[pipe].gate;
+
+       pr_debug("cmd: %x\n", cmd);
+
+       switch (cmd) {
+       case NFC_HCI_ANY_OPEN_PIPE:
+               if (gate != ST21NFCA_APDU_READER_GATE &&
+                       hdev->pipes[pipe].dest_host != NFC_HCI_UICC_HOST_ID)
+                       info->se_info.count_pipes++;
+
+               if (info->se_info.count_pipes == info->se_info.expected_pipes) {
+                       del_timer_sync(&info->se_info.se_active_timer);
+                       info->se_info.se_active = false;
+                       info->se_info.count_pipes = 0;
+                       complete(&info->se_info.req_completion);
+               }
+       break;
+       }
+}
+
+static int st21nfca_admin_event_received(struct nfc_hci_dev *hdev, u8 event,
+                                       struct sk_buff *skb)
+{
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+       pr_debug("admin event: %x\n", event);
+
+       switch (event) {
+       case ST21NFCA_EVT_HOT_PLUG:
+               if (info->se_info.se_active) {
+                       if (!ST21NFCA_EVT_HOT_PLUG_IS_INHIBITED(skb)) {
+                               del_timer_sync(&info->se_info.se_active_timer);
+                               info->se_info.se_active = false;
+                               complete(&info->se_info.req_completion);
+                       } else {
+                               mod_timer(&info->se_info.se_active_timer,
+                                       jiffies +
+                                       msecs_to_jiffies(ST21NFCA_SE_TO_PIPES));
+                       }
+               }
+       break;
+       }
+       kfree_skb(skb);
+       return 0;
+}
+
 /*
  * Returns:
  * <= 0: driver handled the event, skb consumed
  *    1: driver does not handle the event, please do standard processing
  */
-static int st21nfca_hci_event_received(struct nfc_hci_dev *hdev, u8 gate,
+static int st21nfca_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe,
                                       u8 event, struct sk_buff *skb)
 {
+       u8 gate = hdev->pipes[pipe].gate;
+       u8 host = hdev->pipes[pipe].dest_host;
+
        pr_debug("hci event: %d gate: %x\n", event, gate);
 
        switch (gate) {
+       case NFC_HCI_ADMIN_GATE:
+               return st21nfca_admin_event_received(hdev, event, skb);
        case ST21NFCA_RF_CARD_F_GATE:
                return st21nfca_dep_event_received(hdev, event, skb);
+       case ST21NFCA_CONNECTIVITY_GATE:
+               return st21nfca_connectivity_event_received(hdev, host,
+                                                       event, skb);
+       case ST21NFCA_APDU_READER_GATE:
+               return st21nfca_apdu_reader_event_received(hdev, event, skb);
        default:
                return 1;
        }
-       kfree_skb(skb);
-       return 0;
 }
 
 static struct nfc_hci_ops st21nfca_hci_ops = {
@@ -865,11 +953,17 @@ static struct nfc_hci_ops st21nfca_hci_ops = {
        .tm_send = st21nfca_hci_tm_send,
        .check_presence = st21nfca_hci_check_presence,
        .event_received = st21nfca_hci_event_received,
+       .cmd_received = st21nfca_hci_cmd_received,
+       .discover_se = st21nfca_hci_discover_se,
+       .enable_se = st21nfca_hci_enable_se,
+       .disable_se = st21nfca_hci_disable_se,
+       .se_io = st21nfca_hci_se_io,
 };
 
 int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
                       char *llc_name, int phy_headroom, int phy_tailroom,
-                      int phy_payload, struct nfc_hci_dev **hdev)
+                      int phy_payload, struct nfc_hci_dev **hdev,
+                          struct st21nfca_se_status *se_status)
 {
        struct st21nfca_hci_info *info;
        int r = 0;
@@ -929,6 +1023,8 @@ int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
                goto err_alloc_hdev;
        }
 
+       info->se_status = se_status;
+
        nfc_hci_set_clientdata(info->hdev, info);
 
        r = nfc_hci_register_device(info->hdev);
@@ -937,6 +1033,7 @@ int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
 
        *hdev = info->hdev;
        st21nfca_dep_init(info->hdev);
+       st21nfca_se_init(info->hdev);
 
        return 0;
 
@@ -955,6 +1052,7 @@ void st21nfca_hci_remove(struct nfc_hci_dev *hdev)
        struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
 
        st21nfca_dep_deinit(hdev);
+       st21nfca_se_deinit(hdev);
        nfc_hci_unregister_device(hdev);
        nfc_hci_free_device(hdev);
        kfree(info);
index 7c2a852922304513c4bd52794e0b03eb7de11d23..15a78d330a9f32a21d3a58aaeff1a095ad84a0cd 100644 (file)
@@ -20,6 +20,7 @@
 #include <net/nfc/hci.h>
 
 #include "st21nfca_dep.h"
+#include "st21nfca_se.h"
 
 #define HCI_MODE 0
 
 
 #define ST21NFCA_NUM_DEVICES 256
 
+struct st21nfca_se_status {
+       bool is_ese_present;
+       bool is_uicc_present;
+};
+
 int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
                       char *llc_name, int phy_headroom, int phy_tailroom,
-                      int phy_payload, struct nfc_hci_dev **hdev);
+                      int phy_payload, struct nfc_hci_dev **hdev,
+                          struct st21nfca_se_status *se_status);
 void st21nfca_hci_remove(struct nfc_hci_dev *hdev);
 
 enum st21nfca_state {
@@ -66,6 +73,7 @@ struct st21nfca_hci_info {
        void *phy_id;
 
        struct nfc_hci_dev *hdev;
+       struct st21nfca_se_status *se_status;
 
        enum st21nfca_state state;
 
@@ -76,13 +84,16 @@ struct st21nfca_hci_info {
        void *async_cb_context;
 
        struct st21nfca_dep_info dep_info;
+       struct st21nfca_se_info se_info;
 };
 
 /* Reader RF commands */
-#define ST21NFCA_WR_XCHG_DATA            0x10
-
-#define ST21NFCA_RF_READER_F_GATE               0x14
+#define ST21NFCA_WR_XCHG_DATA           0x10
 
-#define ST21NFCA_RF_CARD_F_GATE 0x24
+#define ST21NFCA_DEVICE_MGNT_GATE       0x01
+#define ST21NFCA_RF_READER_F_GATE       0x14
+#define ST21NFCA_RF_CARD_F_GATE                        0x24
+#define ST21NFCA_APDU_READER_GATE              0xf0
+#define ST21NFCA_CONNECTIVITY_GATE             0x41
 
 #endif /* __LOCAL_ST21NFCA_H_ */
diff --git a/drivers/nfc/st21nfca/st21nfca_se.c b/drivers/nfc/st21nfca/st21nfca_se.c
new file mode 100644 (file)
index 0000000..bd13cac
--- /dev/null
@@ -0,0 +1,411 @@
+/*
+ * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <net/nfc/hci.h>
+
+#include "st21nfca.h"
+#include "st21nfca_se.h"
+
+#define ST21NFCA_EVT_UICC_ACTIVATE             0x10
+#define ST21NFCA_EVT_UICC_DEACTIVATE   0x13
+#define ST21NFCA_EVT_SE_HARD_RESET             0x20
+#define ST21NFCA_EVT_SE_SOFT_RESET             0x11
+#define ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER   0x21
+#define ST21NFCA_EVT_SE_ACTIVATE               0x22
+#define ST21NFCA_EVT_SE_DEACTIVATE             0x23
+
+#define ST21NFCA_EVT_TRANSMIT_DATA             0x10
+#define ST21NFCA_EVT_WTX_REQUEST               0x11
+
+#define ST21NFCA_EVT_CONNECTIVITY              0x10
+#define ST21NFCA_EVT_TRANSACTION               0x12
+
+#define ST21NFCA_ESE_HOST_ID                   0xc0
+
+#define ST21NFCA_SE_TO_HOT_PLUG                        1000
+/* Connectivity pipe only */
+#define ST21NFCA_SE_COUNT_PIPE_UICC            0x01
+/* Connectivity + APDU Reader pipe */
+#define ST21NFCA_SE_COUNT_PIPE_EMBEDDED        0x02
+
+#define ST21NFCA_SE_MODE_OFF                   0x00
+#define ST21NFCA_SE_MODE_ON                            0x01
+
+#define ST21NFCA_PARAM_ATR                             0x01
+#define ST21NFCA_ATR_DEFAULT_BWI               0x04
+
+/*
+ * WT = 2^BWI/10[s], convert into msecs and add a secure
+ * room by increasing by 2 this timeout
+ */
+#define ST21NFCA_BWI_TO_TIMEOUT(x)             ((1 << x) * 200)
+#define ST21NFCA_ATR_GET_Y_FROM_TD(x)  (x >> 4)
+
+/* If TA is present bit 0 is set */
+#define ST21NFCA_ATR_TA_PRESENT(x) (x & 0x01)
+/* If TB is present bit 1 is set */
+#define ST21NFCA_ATR_TB_PRESENT(x) (x & 0x02)
+
+static u8 st21nfca_se_get_bwi(struct nfc_hci_dev *hdev)
+{
+       int i;
+       u8 td;
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+       /* Bits 8 to 5 of the first TB for T=1 encode BWI from zero to nine */
+       for (i = 1; i < ST21NFCA_ESE_MAX_LENGTH; i++) {
+               td = ST21NFCA_ATR_GET_Y_FROM_TD(info->se_info.atr[i]);
+               if (ST21NFCA_ATR_TA_PRESENT(td))
+                       i++;
+               if (ST21NFCA_ATR_TB_PRESENT(td)) {
+                       i++;
+                       return info->se_info.atr[i] >> 4;
+               }
+       }
+       return ST21NFCA_ATR_DEFAULT_BWI;
+}
+
+static void st21nfca_se_get_atr(struct nfc_hci_dev *hdev)
+{
+       int r;
+       struct sk_buff *skb;
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+       r = nfc_hci_get_param(hdev, ST21NFCA_APDU_READER_GATE,
+                       ST21NFCA_PARAM_ATR, &skb);
+       if (r < 0)
+               return;
+
+       if (skb->len <= ST21NFCA_ESE_MAX_LENGTH) {
+               memcpy(info->se_info.atr, skb->data, skb->len);
+               info->se_info.wt_timeout =
+                       ST21NFCA_BWI_TO_TIMEOUT(st21nfca_se_get_bwi(hdev));
+       }
+       kfree_skb(skb);
+}
+
+static int st21nfca_hci_control_se(struct nfc_hci_dev *hdev, u32 se_idx,
+                               u8 state)
+{
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+       int r;
+       struct sk_buff *sk_host_list;
+       u8 se_event, host_id;
+
+       switch (se_idx) {
+       case NFC_HCI_UICC_HOST_ID:
+               se_event = (state == ST21NFCA_SE_MODE_ON ?
+                                       ST21NFCA_EVT_UICC_ACTIVATE :
+                                       ST21NFCA_EVT_UICC_DEACTIVATE);
+
+               info->se_info.count_pipes = 0;
+               info->se_info.expected_pipes = ST21NFCA_SE_COUNT_PIPE_UICC;
+               break;
+       case ST21NFCA_ESE_HOST_ID:
+               se_event = (state == ST21NFCA_SE_MODE_ON ?
+                                       ST21NFCA_EVT_SE_ACTIVATE :
+                                       ST21NFCA_EVT_SE_DEACTIVATE);
+
+               info->se_info.count_pipes = 0;
+               info->se_info.expected_pipes = ST21NFCA_SE_COUNT_PIPE_EMBEDDED;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /*
+        * Wait for an EVT_HOT_PLUG in order to
+        * retrieve a relevant host list.
+        */
+       reinit_completion(&info->se_info.req_completion);
+       r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE, se_event,
+                              NULL, 0);
+       if (r < 0)
+               return r;
+
+       mod_timer(&info->se_info.se_active_timer, jiffies +
+               msecs_to_jiffies(ST21NFCA_SE_TO_HOT_PLUG));
+       info->se_info.se_active = true;
+
+       /* Ignore return value and check in any case the host_list */
+       wait_for_completion_interruptible(&info->se_info.req_completion);
+
+       r = nfc_hci_get_param(hdev, NFC_HCI_ADMIN_GATE,
+                       NFC_HCI_ADMIN_HOST_LIST,
+                       &sk_host_list);
+       if (r < 0)
+               return r;
+
+       host_id = sk_host_list->data[sk_host_list->len - 1];
+       kfree_skb(sk_host_list);
+
+       if (state == ST21NFCA_SE_MODE_ON && host_id == se_idx)
+               return se_idx;
+       else if (state == ST21NFCA_SE_MODE_OFF && host_id != se_idx)
+               return se_idx;
+
+       return -1;
+}
+
+int st21nfca_hci_discover_se(struct nfc_hci_dev *hdev)
+{
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+       int se_count = 0;
+
+       if (info->se_status->is_uicc_present) {
+               nfc_add_se(hdev->ndev, NFC_HCI_UICC_HOST_ID, NFC_SE_UICC);
+               se_count++;
+       }
+
+       if (info->se_status->is_ese_present) {
+               nfc_add_se(hdev->ndev, ST21NFCA_ESE_HOST_ID, NFC_SE_EMBEDDED);
+               se_count++;
+       }
+
+       return !se_count;
+}
+EXPORT_SYMBOL(st21nfca_hci_discover_se);
+
+int st21nfca_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx)
+{
+       int r;
+
+       /*
+        * According to upper layer, se_idx == NFC_SE_UICC when
+        * info->se_status->is_uicc_enable is true should never happen.
+        * Same for eSE.
+        */
+       r = st21nfca_hci_control_se(hdev, se_idx, ST21NFCA_SE_MODE_ON);
+
+       if (r == ST21NFCA_ESE_HOST_ID) {
+               st21nfca_se_get_atr(hdev);
+               r = nfc_hci_send_event(hdev, ST21NFCA_APDU_READER_GATE,
+                               ST21NFCA_EVT_SE_SOFT_RESET, NULL, 0);
+               if (r < 0)
+                       return r;
+       } else if (r < 0) {
+               /*
+                * The activation tentative failed, the secure element
+                * is not connected. Remove from the list.
+                */
+               nfc_remove_se(hdev->ndev, se_idx);
+               return r;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(st21nfca_hci_enable_se);
+
+int st21nfca_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx)
+{
+       int r;
+
+       /*
+        * According to upper layer, se_idx == NFC_SE_UICC when
+        * info->se_status->is_uicc_enable is true should never happen
+        * Same for eSE.
+        */
+       r = st21nfca_hci_control_se(hdev, se_idx, ST21NFCA_SE_MODE_OFF);
+       if (r < 0)
+               return r;
+
+       return 0;
+}
+EXPORT_SYMBOL(st21nfca_hci_disable_se);
+
+int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
+                       u8 *apdu, size_t apdu_length,
+                       se_io_cb_t cb, void *cb_context)
+{
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+       pr_debug("se_io %x\n", se_idx);
+
+       switch (se_idx) {
+       case ST21NFCA_ESE_HOST_ID:
+               info->se_info.cb = cb;
+               info->se_info.cb_context = cb_context;
+               mod_timer(&info->se_info.bwi_timer, jiffies +
+                         msecs_to_jiffies(info->se_info.wt_timeout));
+               info->se_info.bwi_active = true;
+               return nfc_hci_send_event(hdev, ST21NFCA_APDU_READER_GATE,
+                                       ST21NFCA_EVT_TRANSMIT_DATA,
+                                       apdu, apdu_length);
+       default:
+               return -ENODEV;
+       }
+}
+EXPORT_SYMBOL(st21nfca_hci_se_io);
+
+static void st21nfca_se_wt_timeout(unsigned long data)
+{
+       /*
+        * No answer from the secure element
+        * within the defined timeout.
+        * Let's send a reset request as recovery procedure.
+        * According to the situation, we first try to send a software reset
+        * to the secure element. If the next command is still not
+        * answering in time, we send to the CLF a secure element hardware
+        * reset request.
+        */
+       /* hardware reset managed through VCC_UICC_OUT power supply */
+       u8 param = 0x01;
+       struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+
+       pr_debug("\n");
+
+       info->se_info.bwi_active = false;
+
+       if (!info->se_info.xch_error) {
+               info->se_info.xch_error = true;
+               nfc_hci_send_event(info->hdev, ST21NFCA_APDU_READER_GATE,
+                               ST21NFCA_EVT_SE_SOFT_RESET, NULL, 0);
+       } else {
+               info->se_info.xch_error = false;
+               nfc_hci_send_event(info->hdev, ST21NFCA_DEVICE_MGNT_GATE,
+                               ST21NFCA_EVT_SE_HARD_RESET, &param, 1);
+       }
+       info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
+}
+
+static void st21nfca_se_activation_timeout(unsigned long data)
+{
+       struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+
+       pr_debug("\n");
+
+       info->se_info.se_active = false;
+
+       complete(&info->se_info.req_completion);
+}
+
+/*
+ * Returns:
+ * <= 0: driver handled the event, skb consumed
+ *    1: driver does not handle the event, please do standard processing
+ */
+int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
+                               u8 event, struct sk_buff *skb)
+{
+       int r = 0;
+       struct device *dev = &hdev->ndev->dev;
+       struct nfc_evt_transaction *transaction;
+
+       pr_debug("connectivity gate event: %x\n", event);
+
+       switch (event) {
+       case ST21NFCA_EVT_CONNECTIVITY:
+               break;
+       case ST21NFCA_EVT_TRANSACTION:
+               if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
+                   skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
+                       return -EPROTO;
+
+               transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
+                                                  skb->len - 2, GFP_KERNEL);
+
+               transaction->aid_len = skb->data[1];
+               memcpy(transaction->aid, &skb->data[2], skb->data[1]);
+
+               if (skb->data[transaction->aid_len + 2] !=
+                   NFC_EVT_TRANSACTION_PARAMS_TAG)
+                       return -EPROTO;
+
+               transaction->params_len = skb->data[transaction->aid_len + 3];
+               memcpy(transaction->params, skb->data +
+                      transaction->aid_len + 4, transaction->params_len);
+
+               r = nfc_se_transaction(hdev->ndev, host, transaction);
+               break;
+       default:
+               return 1;
+       }
+       kfree_skb(skb);
+       return r;
+}
+EXPORT_SYMBOL(st21nfca_connectivity_event_received);
+
+int st21nfca_apdu_reader_event_received(struct nfc_hci_dev *hdev,
+                                       u8 event, struct sk_buff *skb)
+{
+       int r = 0;
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+       pr_debug("apdu reader gate event: %x\n", event);
+
+       switch (event) {
+       case ST21NFCA_EVT_TRANSMIT_DATA:
+               del_timer_sync(&info->se_info.bwi_timer);
+               info->se_info.bwi_active = false;
+               r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+                               ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0);
+               if (r < 0)
+                       goto exit;
+
+               info->se_info.cb(info->se_info.cb_context,
+                       skb->data, skb->len, 0);
+               break;
+       case ST21NFCA_EVT_WTX_REQUEST:
+               mod_timer(&info->se_info.bwi_timer, jiffies +
+                               msecs_to_jiffies(info->se_info.wt_timeout));
+               break;
+       }
+
+exit:
+       kfree_skb(skb);
+       return r;
+}
+EXPORT_SYMBOL(st21nfca_apdu_reader_event_received);
+
+void st21nfca_se_init(struct nfc_hci_dev *hdev)
+{
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+       init_completion(&info->se_info.req_completion);
+       /* initialize timers */
+       init_timer(&info->se_info.bwi_timer);
+       info->se_info.bwi_timer.data = (unsigned long)info;
+       info->se_info.bwi_timer.function = st21nfca_se_wt_timeout;
+       info->se_info.bwi_active = false;
+
+       init_timer(&info->se_info.se_active_timer);
+       info->se_info.se_active_timer.data = (unsigned long)info;
+       info->se_info.se_active_timer.function = st21nfca_se_activation_timeout;
+       info->se_info.se_active = false;
+
+       info->se_info.count_pipes = 0;
+       info->se_info.expected_pipes = 0;
+
+       info->se_info.xch_error = false;
+
+       info->se_info.wt_timeout =
+                       ST21NFCA_BWI_TO_TIMEOUT(ST21NFCA_ATR_DEFAULT_BWI);
+}
+EXPORT_SYMBOL(st21nfca_se_init);
+
+void st21nfca_se_deinit(struct nfc_hci_dev *hdev)
+{
+       struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+       if (info->se_info.bwi_active)
+               del_timer_sync(&info->se_info.bwi_timer);
+       if (info->se_info.se_active)
+               del_timer_sync(&info->se_info.se_active_timer);
+
+       info->se_info.bwi_active = false;
+       info->se_info.se_active = false;
+}
+EXPORT_SYMBOL(st21nfca_se_deinit);
diff --git a/drivers/nfc/st21nfca/st21nfca_se.h b/drivers/nfc/st21nfca/st21nfca_se.h
new file mode 100644 (file)
index 0000000..b172cfc
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ST21NFCA_SE_H
+#define __ST21NFCA_SE_H
+
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+
+/*
+ * ref ISO7816-3 chap 8.1. the initial character TS is followed by a
+ * sequence of at most 32 characters.
+ */
+#define ST21NFCA_ESE_MAX_LENGTH                        33
+#define ST21NFCA_ESE_HOST_ID                   0xc0
+
+struct st21nfca_se_info {
+       u8 atr[ST21NFCA_ESE_MAX_LENGTH];
+       struct completion req_completion;
+
+       struct timer_list bwi_timer;
+       int wt_timeout; /* in msecs */
+       bool bwi_active;
+
+       struct timer_list se_active_timer;
+       bool se_active;
+       int expected_pipes;
+       int count_pipes;
+
+       bool xch_error;
+
+       se_io_cb_t cb;
+       void *cb_context;
+};
+
+int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
+                                       u8 event, struct sk_buff *skb);
+int st21nfca_apdu_reader_event_received(struct nfc_hci_dev *hdev,
+                                       u8 event, struct sk_buff *skb);
+
+int st21nfca_hci_discover_se(struct nfc_hci_dev *hdev);
+int st21nfca_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx);
+int st21nfca_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx);
+int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
+               u8 *apdu, size_t apdu_length,
+               se_io_cb_t cb, void *cb_context);
+
+void st21nfca_se_init(struct nfc_hci_dev *hdev);
+void st21nfca_se_deinit(struct nfc_hci_dev *hdev);
+#endif /* __ST21NFCA_SE_H */
index f4d835dd15f24230afaf995496146b8a520b0cfb..ce659a9e5a1a36beefd4e362aceae9265337826c 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for ST21NFCB NCI based NFC driver
 #
 
-st21nfcb_nci-objs = ndlc.o st21nfcb.o
+st21nfcb_nci-objs = ndlc.o st21nfcb.o st21nfcb_se.o
 obj-$(CONFIG_NFC_ST21NFCB)     += st21nfcb_nci.o
 
 st21nfcb_i2c-objs = i2c.o
index 01ba865863ee93dfb6843ae26d182efa288b6284..eb886932d97278cfa044fb66a20dea49ffcaca6b 100644 (file)
@@ -199,7 +199,7 @@ static irqreturn_t st21nfcb_nci_irq_thread_fn(int irq, void *phy_id)
        struct sk_buff *skb = NULL;
        int r;
 
-       if (!phy || irq != phy->i2c_dev->irq) {
+       if (!phy || !phy->ndlc || irq != phy->i2c_dev->irq) {
                WARN_ON_ONCE(1);
                return IRQ_NONE;
        }
@@ -343,18 +343,22 @@ static int st21nfcb_nci_i2c_probe(struct i2c_client *client,
                return -ENODEV;
        }
 
+       r = ndlc_probe(phy, &i2c_phy_ops, &client->dev,
+                       ST21NFCB_FRAME_HEADROOM, ST21NFCB_FRAME_TAILROOM,
+                       &phy->ndlc);
+       if (r < 0) {
+               nfc_err(&client->dev, "Unable to register ndlc layer\n");
+               return r;
+       }
+
        r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
                                st21nfcb_nci_irq_thread_fn,
                                phy->irq_polarity | IRQF_ONESHOT,
                                ST21NFCB_NCI_DRIVER_NAME, phy);
-       if (r < 0) {
+       if (r < 0)
                nfc_err(&client->dev, "Unable to register IRQ handler\n");
-               return r;
-       }
 
-       return ndlc_probe(phy, &i2c_phy_ops, &client->dev,
-                       ST21NFCB_FRAME_HEADROOM, ST21NFCB_FRAME_TAILROOM,
-                       &phy->ndlc);
+       return r;
 }
 
 static int st21nfcb_nci_i2c_remove(struct i2c_client *client)
@@ -373,6 +377,7 @@ static int st21nfcb_nci_i2c_remove(struct i2c_client *client)
 
 #ifdef CONFIG_OF
 static const struct of_device_id of_st21nfcb_i2c_match[] = {
+       { .compatible = "st,st21nfcb-i2c", },
        { .compatible = "st,st21nfcb_i2c", },
        {}
 };
index bac50e805f1d565ba17d4c0056fc8a2f2847ae90..5fbf59d2138c1b0a0c723550905d37931a89bcbe 100644 (file)
@@ -138,7 +138,7 @@ static void llt_ndlc_requeue_data_pending(struct llt_ndlc *ndlc)
                default:
                        pr_err("UNKNOWN Packet Control Byte=%d\n", pcb);
                        kfree_skb(skb);
-                       break;
+                       continue;
                }
                skb_queue_head(&ndlc->send_q, skb);
        }
@@ -297,6 +297,5 @@ void ndlc_remove(struct llt_ndlc *ndlc)
        skb_queue_purge(&ndlc->send_q);
 
        st21nfcb_nci_remove(ndlc->ndev);
-       kfree(ndlc);
 }
 EXPORT_SYMBOL(ndlc_remove);
index ea63d5877831bf40b3dafbc059ef6651bf3c9a28..ca9871ab3fb3c7040188d6ee8c6404bb26d876e5 100644 (file)
@@ -22,6 +22,7 @@
 #include <net/nfc/nci_core.h>
 
 #include "st21nfcb.h"
+#include "st21nfcb_se.h"
 
 #define DRIVER_DESC "NCI NFC driver for ST21NFCB"
 
@@ -78,6 +79,13 @@ static struct nci_ops st21nfcb_nci_ops = {
        .close = st21nfcb_nci_close,
        .send = st21nfcb_nci_send,
        .get_rfprotocol = st21nfcb_nci_get_rfprotocol,
+       .discover_se = st21nfcb_nci_discover_se,
+       .enable_se = st21nfcb_nci_enable_se,
+       .disable_se = st21nfcb_nci_disable_se,
+       .se_io = st21nfcb_nci_se_io,
+       .hci_load_session = st21nfcb_hci_load_session,
+       .hci_event_received = st21nfcb_hci_event_received,
+       .hci_cmd_received = st21nfcb_hci_cmd_received,
 };
 
 int st21nfcb_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
@@ -114,9 +122,10 @@ int st21nfcb_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
        if (r) {
                pr_err("Cannot register nfc device to nci core\n");
                nci_free_device(ndlc->ndev);
+               return r;
        }
 
-       return r;
+       return st21nfcb_se_init(ndlc->ndev);
 }
 EXPORT_SYMBOL_GPL(st21nfcb_nci_probe);
 
index ea58a56ad794b792865aaaa59f37d801e8c83bb1..5ef8a58c9839dd542a7ba85911c5f931ce7ee616 100644 (file)
@@ -19,6 +19,7 @@
 #ifndef __LOCAL_ST21NFCB_H_
 #define __LOCAL_ST21NFCB_H_
 
+#include "st21nfcb_se.h"
 #include "ndlc.h"
 
 /* Define private flags: */
@@ -27,6 +28,7 @@
 struct st21nfcb_nci_info {
        struct llt_ndlc *ndlc;
        unsigned long flags;
+       struct st21nfcb_se_info se_info;
 };
 
 void st21nfcb_nci_remove(struct nci_dev *ndev);
diff --git a/drivers/nfc/st21nfcb/st21nfcb_se.c b/drivers/nfc/st21nfcb/st21nfcb_se.c
new file mode 100644 (file)
index 0000000..7c82e9d
--- /dev/null
@@ -0,0 +1,707 @@
+/*
+ * NCI based Driver for STMicroelectronics NFC Chip
+ *
+ * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <linux/delay.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+
+#include "st21nfcb.h"
+#include "st21nfcb_se.h"
+
+struct st21nfcb_pipe_info {
+       u8 pipe_state;
+       u8 src_host_id;
+       u8 src_gate_id;
+       u8 dst_host_id;
+       u8 dst_gate_id;
+} __packed;
+
+/* Hosts */
+#define ST21NFCB_HOST_CONTROLLER_ID     0x00
+#define ST21NFCB_TERMINAL_HOST_ID       0x01
+#define ST21NFCB_UICC_HOST_ID           0x02
+#define ST21NFCB_ESE_HOST_ID            0xc0
+
+/* Gates */
+#define ST21NFCB_DEVICE_MGNT_GATE       0x01
+#define ST21NFCB_APDU_READER_GATE       0xf0
+#define ST21NFCB_CONNECTIVITY_GATE      0x41
+
+/* Pipes */
+#define ST21NFCB_DEVICE_MGNT_PIPE               0x02
+
+/* Connectivity pipe only */
+#define ST21NFCB_SE_COUNT_PIPE_UICC             0x01
+/* Connectivity + APDU Reader pipe */
+#define ST21NFCB_SE_COUNT_PIPE_EMBEDDED         0x02
+
+#define ST21NFCB_SE_TO_HOT_PLUG                        1000 /* msecs */
+#define ST21NFCB_SE_TO_PIPES                   2000
+
+#define ST21NFCB_EVT_HOT_PLUG_IS_INHIBITED(x)   (x->data[0] & 0x80)
+
+#define NCI_HCI_APDU_PARAM_ATR                     0x01
+#define NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY       0x01
+#define NCI_HCI_ADMIN_PARAM_WHITELIST              0x03
+#define NCI_HCI_ADMIN_PARAM_HOST_LIST              0x04
+
+#define ST21NFCB_EVT_SE_HARD_RESET             0x20
+#define ST21NFCB_EVT_TRANSMIT_DATA             0x10
+#define ST21NFCB_EVT_WTX_REQUEST               0x11
+#define ST21NFCB_EVT_SE_SOFT_RESET             0x11
+#define ST21NFCB_EVT_SE_END_OF_APDU_TRANSFER   0x21
+#define ST21NFCB_EVT_HOT_PLUG                  0x03
+
+#define ST21NFCB_SE_MODE_OFF                    0x00
+#define ST21NFCB_SE_MODE_ON                     0x01
+
+#define ST21NFCB_EVT_CONNECTIVITY       0x10
+#define ST21NFCB_EVT_TRANSACTION        0x12
+
+#define ST21NFCB_DM_GETINFO             0x13
+#define ST21NFCB_DM_GETINFO_PIPE_LIST   0x02
+#define ST21NFCB_DM_GETINFO_PIPE_INFO   0x01
+#define ST21NFCB_DM_PIPE_CREATED        0x02
+#define ST21NFCB_DM_PIPE_OPEN           0x04
+#define ST21NFCB_DM_RF_ACTIVE           0x80
+#define ST21NFCB_DM_DISCONNECT          0x30
+
+#define ST21NFCB_DM_IS_PIPE_OPEN(p) \
+       ((p & 0x0f) == (ST21NFCB_DM_PIPE_CREATED | ST21NFCB_DM_PIPE_OPEN))
+
+#define ST21NFCB_ATR_DEFAULT_BWI        0x04
+
+/*
+ * WT = 2^BWI/10[s], convert into msecs and add a secure
+ * room by increasing by 2 this timeout
+ */
+#define ST21NFCB_BWI_TO_TIMEOUT(x)      ((1 << x) * 200)
+#define ST21NFCB_ATR_GET_Y_FROM_TD(x)   (x >> 4)
+
+/* If TA is present bit 0 is set */
+#define ST21NFCB_ATR_TA_PRESENT(x) (x & 0x01)
+/* If TB is present bit 1 is set */
+#define ST21NFCB_ATR_TB_PRESENT(x) (x & 0x02)
+
+#define ST21NFCB_NUM_DEVICES           256
+
+static DECLARE_BITMAP(dev_mask, ST21NFCB_NUM_DEVICES);
+
+/* Here are the mandatory pipe for st21nfcb */
+static struct nci_hci_gate st21nfcb_gates[] = {
+       {NCI_HCI_ADMIN_GATE, NCI_HCI_ADMIN_PIPE,
+                                       ST21NFCB_HOST_CONTROLLER_ID},
+       {NCI_HCI_LINK_MGMT_GATE, NCI_HCI_LINK_MGMT_PIPE,
+                                       ST21NFCB_HOST_CONTROLLER_ID},
+       {ST21NFCB_DEVICE_MGNT_GATE, ST21NFCB_DEVICE_MGNT_PIPE,
+                                       ST21NFCB_HOST_CONTROLLER_ID},
+
+       /* Secure element pipes are created by secure element host */
+       {ST21NFCB_CONNECTIVITY_GATE, NCI_HCI_DO_NOT_OPEN_PIPE,
+                                       ST21NFCB_HOST_CONTROLLER_ID},
+       {ST21NFCB_APDU_READER_GATE, NCI_HCI_DO_NOT_OPEN_PIPE,
+                                       ST21NFCB_HOST_CONTROLLER_ID},
+};
+
+static u8 st21nfcb_se_get_bwi(struct nci_dev *ndev)
+{
+       int i;
+       u8 td;
+       struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+
+       /* Bits 8 to 5 of the first TB for T=1 encode BWI from zero to nine */
+       for (i = 1; i < ST21NFCB_ESE_MAX_LENGTH; i++) {
+               td = ST21NFCB_ATR_GET_Y_FROM_TD(info->se_info.atr[i]);
+               if (ST21NFCB_ATR_TA_PRESENT(td))
+                       i++;
+               if (ST21NFCB_ATR_TB_PRESENT(td)) {
+                       i++;
+                       return info->se_info.atr[i] >> 4;
+               }
+       }
+       return ST21NFCB_ATR_DEFAULT_BWI;
+}
+
+static void st21nfcb_se_get_atr(struct nci_dev *ndev)
+{
+       struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+       int r;
+       struct sk_buff *skb;
+
+       r = nci_hci_get_param(ndev, ST21NFCB_APDU_READER_GATE,
+                               NCI_HCI_APDU_PARAM_ATR, &skb);
+       if (r < 0)
+               return;
+
+       if (skb->len <= ST21NFCB_ESE_MAX_LENGTH) {
+               memcpy(info->se_info.atr, skb->data, skb->len);
+
+               info->se_info.wt_timeout =
+                       ST21NFCB_BWI_TO_TIMEOUT(st21nfcb_se_get_bwi(ndev));
+       }
+       kfree_skb(skb);
+}
+
+int st21nfcb_hci_load_session(struct nci_dev *ndev)
+{
+       int i, j, r;
+       struct sk_buff *skb_pipe_list, *skb_pipe_info;
+       struct st21nfcb_pipe_info *dm_pipe_info;
+       u8 pipe_list[] = { ST21NFCB_DM_GETINFO_PIPE_LIST,
+                       ST21NFCB_TERMINAL_HOST_ID};
+       u8 pipe_info[] = { ST21NFCB_DM_GETINFO_PIPE_INFO,
+                       ST21NFCB_TERMINAL_HOST_ID, 0};
+
+       /* On ST21NFCB device pipes number are dynamics
+        * If pipes are already created, hci_dev_up will fail.
+        * Doing a clear all pipe is a bad idea because:
+        * - It does useless EEPROM cycling
+        * - It might cause issue for secure elements support
+        * (such as removing connectivity or APDU reader pipe)
+        * A better approach on ST21NFCB is to:
+        * - get a pipe list for each host.
+        * (eg: ST21NFCB_HOST_CONTROLLER_ID for now).
+        * (TODO Later on UICC HOST and eSE HOST)
+        * - get pipe information
+        * - match retrieved pipe list in st21nfcb_gates
+        * ST21NFCB_DEVICE_MGNT_GATE is a proprietary gate
+        * with ST21NFCB_DEVICE_MGNT_PIPE.
+        * Pipe can be closed and need to be open.
+        */
+       r = nci_hci_connect_gate(ndev, ST21NFCB_HOST_CONTROLLER_ID,
+                               ST21NFCB_DEVICE_MGNT_GATE,
+                               ST21NFCB_DEVICE_MGNT_PIPE);
+       if (r < 0)
+               goto free_info;
+
+       /* Get pipe list */
+       r = nci_hci_send_cmd(ndev, ST21NFCB_DEVICE_MGNT_GATE,
+                       ST21NFCB_DM_GETINFO, pipe_list, sizeof(pipe_list),
+                       &skb_pipe_list);
+       if (r < 0)
+               goto free_info;
+
+       /* Complete the existing gate_pipe table */
+       for (i = 0; i < skb_pipe_list->len; i++) {
+               pipe_info[2] = skb_pipe_list->data[i];
+               r = nci_hci_send_cmd(ndev, ST21NFCB_DEVICE_MGNT_GATE,
+                                       ST21NFCB_DM_GETINFO, pipe_info,
+                                       sizeof(pipe_info), &skb_pipe_info);
+
+               if (r)
+                       continue;
+
+               /*
+                * Match pipe ID and gate ID
+                * Output format from ST21NFC_DM_GETINFO is:
+                * - pipe state (1byte)
+                * - source hid (1byte)
+                * - source gid (1byte)
+                * - destination hid (1byte)
+                * - destination gid (1byte)
+                */
+               dm_pipe_info = (struct st21nfcb_pipe_info *)skb_pipe_info->data;
+               if (dm_pipe_info->dst_gate_id == ST21NFCB_APDU_READER_GATE &&
+                   dm_pipe_info->src_host_id != ST21NFCB_ESE_HOST_ID) {
+                       pr_err("Unexpected apdu_reader pipe on host %x\n",
+                              dm_pipe_info->src_host_id);
+                       continue;
+               }
+
+               for (j = 0; (j < ARRAY_SIZE(st21nfcb_gates)) &&
+                    (st21nfcb_gates[j].gate != dm_pipe_info->dst_gate_id); j++)
+                       ;
+
+               if (j < ARRAY_SIZE(st21nfcb_gates) &&
+                   st21nfcb_gates[j].gate == dm_pipe_info->dst_gate_id &&
+                   ST21NFCB_DM_IS_PIPE_OPEN(dm_pipe_info->pipe_state)) {
+                       st21nfcb_gates[j].pipe = pipe_info[2];
+
+                       ndev->hci_dev->gate2pipe[st21nfcb_gates[j].gate] =
+                                               st21nfcb_gates[j].pipe;
+                       ndev->hci_dev->pipes[st21nfcb_gates[j].pipe].gate =
+                                               st21nfcb_gates[j].gate;
+                       ndev->hci_dev->pipes[st21nfcb_gates[j].pipe].host =
+                                               dm_pipe_info->src_host_id;
+               }
+       }
+
+       memcpy(ndev->hci_dev->init_data.gates, st21nfcb_gates,
+              sizeof(st21nfcb_gates));
+
+free_info:
+       kfree_skb(skb_pipe_info);
+       kfree_skb(skb_pipe_list);
+       return r;
+}
+EXPORT_SYMBOL_GPL(st21nfcb_hci_load_session);
+
+static void st21nfcb_hci_admin_event_received(struct nci_dev *ndev,
+                                             u8 event, struct sk_buff *skb)
+{
+       struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+
+       switch (event) {
+       case ST21NFCB_EVT_HOT_PLUG:
+               if (info->se_info.se_active) {
+                       if (!ST21NFCB_EVT_HOT_PLUG_IS_INHIBITED(skb)) {
+                               del_timer_sync(&info->se_info.se_active_timer);
+                               info->se_info.se_active = false;
+                               complete(&info->se_info.req_completion);
+                       } else {
+                               mod_timer(&info->se_info.se_active_timer,
+                                     jiffies +
+                                     msecs_to_jiffies(ST21NFCB_SE_TO_PIPES));
+                       }
+               }
+       break;
+       }
+}
+
+static int st21nfcb_hci_apdu_reader_event_received(struct nci_dev *ndev,
+                                                  u8 event,
+                                                  struct sk_buff *skb)
+{
+       int r = 0;
+       struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+
+       pr_debug("apdu reader gate event: %x\n", event);
+
+       switch (event) {
+       case ST21NFCB_EVT_TRANSMIT_DATA:
+               del_timer_sync(&info->se_info.bwi_timer);
+               info->se_info.bwi_active = false;
+               info->se_info.cb(info->se_info.cb_context,
+                                skb->data, skb->len, 0);
+       break;
+       case ST21NFCB_EVT_WTX_REQUEST:
+               mod_timer(&info->se_info.bwi_timer, jiffies +
+                         msecs_to_jiffies(info->se_info.wt_timeout));
+       break;
+       }
+
+       kfree_skb(skb);
+       return r;
+}
+
+/*
+ * Returns:
+ * <= 0: driver handled the event, skb consumed
+ *    1: driver does not handle the event, please do standard processing
+ */
+static int st21nfcb_hci_connectivity_event_received(struct nci_dev *ndev,
+                                               u8 host, u8 event,
+                                               struct sk_buff *skb)
+{
+       int r = 0;
+       struct device *dev = &ndev->nfc_dev->dev;
+       struct nfc_evt_transaction *transaction;
+
+       pr_debug("connectivity gate event: %x\n", event);
+
+       switch (event) {
+       case ST21NFCB_EVT_CONNECTIVITY:
+
+       break;
+       case ST21NFCB_EVT_TRANSACTION:
+               if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
+                   skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
+                       return -EPROTO;
+
+               transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
+                                           skb->len - 2, GFP_KERNEL);
+
+               transaction->aid_len = skb->data[1];
+               memcpy(transaction->aid, &skb->data[2], skb->data[1]);
+
+               if (skb->data[transaction->aid_len + 2] !=
+                   NFC_EVT_TRANSACTION_PARAMS_TAG)
+                       return -EPROTO;
+
+               transaction->params_len = skb->data[transaction->aid_len + 3];
+               memcpy(transaction->params, skb->data +
+                      transaction->aid_len + 4, transaction->params_len);
+
+               r = nfc_se_transaction(ndev->nfc_dev, host, transaction);
+       default:
+               return 1;
+       }
+       kfree_skb(skb);
+       return r;
+}
+
+void st21nfcb_hci_event_received(struct nci_dev *ndev, u8 pipe,
+                                u8 event, struct sk_buff *skb)
+{
+       u8 gate = ndev->hci_dev->pipes[pipe].gate;
+       u8 host = ndev->hci_dev->pipes[pipe].host;
+
+       switch (gate) {
+       case NCI_HCI_ADMIN_GATE:
+               st21nfcb_hci_admin_event_received(ndev, event, skb);
+       break;
+       case ST21NFCB_APDU_READER_GATE:
+               st21nfcb_hci_apdu_reader_event_received(ndev, event, skb);
+       break;
+       case ST21NFCB_CONNECTIVITY_GATE:
+               st21nfcb_hci_connectivity_event_received(ndev, host, event,
+                                                        skb);
+       break;
+       }
+}
+EXPORT_SYMBOL_GPL(st21nfcb_hci_event_received);
+
+
+void st21nfcb_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
+                              struct sk_buff *skb)
+{
+       struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+       u8 gate = ndev->hci_dev->pipes[pipe].gate;
+
+       pr_debug("cmd: %x\n", cmd);
+
+       switch (cmd) {
+       case NCI_HCI_ANY_OPEN_PIPE:
+               if (gate != ST21NFCB_APDU_READER_GATE &&
+                   ndev->hci_dev->pipes[pipe].host != ST21NFCB_UICC_HOST_ID)
+                       ndev->hci_dev->count_pipes++;
+
+               if (ndev->hci_dev->count_pipes ==
+                   ndev->hci_dev->expected_pipes) {
+                       del_timer_sync(&info->se_info.se_active_timer);
+                       info->se_info.se_active = false;
+                       ndev->hci_dev->count_pipes = 0;
+                       complete(&info->se_info.req_completion);
+               }
+       break;
+       }
+}
+EXPORT_SYMBOL_GPL(st21nfcb_hci_cmd_received);
+
+/*
+ * Remarks: On some early st21nfcb firmware, nci_nfcee_mode_set(0)
+ * is rejected
+ */
+static int st21nfcb_nci_control_se(struct nci_dev *ndev, u8 se_idx,
+                                  u8 state)
+{
+       struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+       int r;
+       struct sk_buff *sk_host_list;
+       u8 host_id;
+
+       switch (se_idx) {
+       case ST21NFCB_UICC_HOST_ID:
+               ndev->hci_dev->count_pipes = 0;
+               ndev->hci_dev->expected_pipes = ST21NFCB_SE_COUNT_PIPE_UICC;
+               break;
+       case ST21NFCB_ESE_HOST_ID:
+               ndev->hci_dev->count_pipes = 0;
+               ndev->hci_dev->expected_pipes = ST21NFCB_SE_COUNT_PIPE_EMBEDDED;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /*
+        * Wait for an EVT_HOT_PLUG in order to
+        * retrieve a relevant host list.
+        */
+       reinit_completion(&info->se_info.req_completion);
+       r = nci_nfcee_mode_set(ndev, se_idx, NCI_NFCEE_ENABLE);
+       if (r != NCI_STATUS_OK)
+               return r;
+
+       mod_timer(&info->se_info.se_active_timer, jiffies +
+               msecs_to_jiffies(ST21NFCB_SE_TO_HOT_PLUG));
+       info->se_info.se_active = true;
+
+       /* Ignore return value and check in any case the host_list */
+       wait_for_completion_interruptible(&info->se_info.req_completion);
+
+       /* There might be some "collision" after receiving a HOT_PLUG event
+        * This may cause the CLF to not answer to the next hci command.
+        * There is no possible synchronization to prevent this.
+        * Adding a small delay is the only way to solve the issue.
+        */
+       usleep_range(3000, 5000);
+
+       r = nci_hci_get_param(ndev, NCI_HCI_ADMIN_GATE,
+                       NCI_HCI_ADMIN_PARAM_HOST_LIST, &sk_host_list);
+       if (r != NCI_HCI_ANY_OK)
+               return r;
+
+       host_id = sk_host_list->data[sk_host_list->len - 1];
+       kfree_skb(sk_host_list);
+       if (state == ST21NFCB_SE_MODE_ON && host_id == se_idx)
+               return se_idx;
+       else if (state == ST21NFCB_SE_MODE_OFF && host_id != se_idx)
+               return se_idx;
+
+       return -1;
+}
+
+int st21nfcb_nci_disable_se(struct nci_dev *ndev, u32 se_idx)
+{
+       int r;
+
+       pr_debug("st21nfcb_nci_disable_se\n");
+
+       if (se_idx == NFC_SE_EMBEDDED) {
+               r = nci_hci_send_event(ndev, ST21NFCB_APDU_READER_GATE,
+                               ST21NFCB_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0);
+               if (r < 0)
+                       return r;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(st21nfcb_nci_disable_se);
+
+int st21nfcb_nci_enable_se(struct nci_dev *ndev, u32 se_idx)
+{
+       int r;
+
+       pr_debug("st21nfcb_nci_enable_se\n");
+
+       if (se_idx == ST21NFCB_HCI_HOST_ID_ESE) {
+               r = nci_hci_send_event(ndev, ST21NFCB_APDU_READER_GATE,
+                               ST21NFCB_EVT_SE_SOFT_RESET, NULL, 0);
+               if (r < 0)
+                       return r;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(st21nfcb_nci_enable_se);
+
+static int st21nfcb_hci_network_init(struct nci_dev *ndev)
+{
+       struct core_conn_create_dest_spec_params *dest_params;
+       struct dest_spec_params spec_params;
+       struct nci_conn_info    *conn_info;
+       int r, dev_num;
+
+       dest_params =
+               kzalloc(sizeof(struct core_conn_create_dest_spec_params) +
+                       sizeof(struct dest_spec_params), GFP_KERNEL);
+       if (dest_params == NULL) {
+               r = -ENOMEM;
+               goto exit;
+       }
+
+       dest_params->type = NCI_DESTINATION_SPECIFIC_PARAM_NFCEE_TYPE;
+       dest_params->length = sizeof(struct dest_spec_params);
+       spec_params.id = ndev->hci_dev->nfcee_id;
+       spec_params.protocol = NCI_NFCEE_INTERFACE_HCI_ACCESS;
+       memcpy(dest_params->value, &spec_params, sizeof(struct dest_spec_params));
+       r = nci_core_conn_create(ndev, NCI_DESTINATION_NFCEE, 1,
+                                sizeof(struct core_conn_create_dest_spec_params) +
+                                sizeof(struct dest_spec_params),
+                                dest_params);
+       if (r != NCI_STATUS_OK)
+               goto free_dest_params;
+
+       conn_info = ndev->hci_dev->conn_info;
+       if (!conn_info)
+               goto free_dest_params;
+
+       memcpy(ndev->hci_dev->init_data.gates, st21nfcb_gates,
+              sizeof(st21nfcb_gates));
+
+       /*
+        * Session id must include the driver name + i2c bus addr
+        * persistent info to discriminate 2 identical chips
+        */
+       dev_num = find_first_zero_bit(dev_mask, ST21NFCB_NUM_DEVICES);
+       if (dev_num >= ST21NFCB_NUM_DEVICES) {
+               r = -ENODEV;
+               goto free_dest_params;
+       }
+
+       scnprintf(ndev->hci_dev->init_data.session_id,
+                 sizeof(ndev->hci_dev->init_data.session_id),
+                 "%s%2x", "ST21BH", dev_num);
+
+       r = nci_hci_dev_session_init(ndev);
+       if (r != NCI_HCI_ANY_OK)
+               goto exit;
+
+       r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id,
+                              NCI_NFCEE_ENABLE);
+       if (r != NCI_STATUS_OK)
+               goto exit;
+
+       return 0;
+
+free_dest_params:
+       kfree(dest_params);
+
+exit:
+       return r;
+}
+
+int st21nfcb_nci_discover_se(struct nci_dev *ndev)
+{
+       u8 param[2];
+       int r;
+       int se_count = 0;
+
+       pr_debug("st21nfcb_nci_discover_se\n");
+
+       r = st21nfcb_hci_network_init(ndev);
+       if (r != 0)
+               return r;
+
+       param[0] = ST21NFCB_UICC_HOST_ID;
+       param[1] = ST21NFCB_HCI_HOST_ID_ESE;
+       r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE,
+                               NCI_HCI_ADMIN_PARAM_WHITELIST,
+                               param, sizeof(param));
+       if (r != NCI_HCI_ANY_OK)
+               return r;
+
+       r = st21nfcb_nci_control_se(ndev, ST21NFCB_UICC_HOST_ID,
+                               ST21NFCB_SE_MODE_ON);
+       if (r == ST21NFCB_UICC_HOST_ID) {
+               nfc_add_se(ndev->nfc_dev, ST21NFCB_UICC_HOST_ID, NFC_SE_UICC);
+               se_count++;
+       }
+
+       /* Try to enable eSE in order to check availability */
+       r = st21nfcb_nci_control_se(ndev, ST21NFCB_HCI_HOST_ID_ESE,
+                               ST21NFCB_SE_MODE_ON);
+       if (r == ST21NFCB_HCI_HOST_ID_ESE) {
+               nfc_add_se(ndev->nfc_dev, ST21NFCB_HCI_HOST_ID_ESE,
+                          NFC_SE_EMBEDDED);
+               se_count++;
+               st21nfcb_se_get_atr(ndev);
+       }
+
+       return !se_count;
+}
+EXPORT_SYMBOL_GPL(st21nfcb_nci_discover_se);
+
+int st21nfcb_nci_se_io(struct nci_dev *ndev, u32 se_idx,
+                      u8 *apdu, size_t apdu_length,
+                      se_io_cb_t cb, void *cb_context)
+{
+       struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+
+       pr_debug("\n");
+
+       switch (se_idx) {
+       case ST21NFCB_HCI_HOST_ID_ESE:
+               info->se_info.cb = cb;
+               info->se_info.cb_context = cb_context;
+               mod_timer(&info->se_info.bwi_timer, jiffies +
+                         msecs_to_jiffies(info->se_info.wt_timeout));
+               info->se_info.bwi_active = true;
+               return nci_hci_send_event(ndev, ST21NFCB_APDU_READER_GATE,
+                                       ST21NFCB_EVT_TRANSMIT_DATA, apdu,
+                                       apdu_length);
+       default:
+               return -ENODEV;
+       }
+}
+EXPORT_SYMBOL(st21nfcb_nci_se_io);
+
+static void st21nfcb_se_wt_timeout(unsigned long data)
+{
+       /*
+        * No answer from the secure element
+        * within the defined timeout.
+        * Let's send a reset request as recovery procedure.
+        * According to the situation, we first try to send a software reset
+        * to the secure element. If the next command is still not
+        * answering in time, we send to the CLF a secure element hardware
+        * reset request.
+        */
+       /* hardware reset managed through VCC_UICC_OUT power supply */
+       u8 param = 0x01;
+       struct st21nfcb_nci_info *info = (struct st21nfcb_nci_info *) data;
+
+       pr_debug("\n");
+
+       info->se_info.bwi_active = false;
+
+       if (!info->se_info.xch_error) {
+               info->se_info.xch_error = true;
+               nci_hci_send_event(info->ndlc->ndev, ST21NFCB_APDU_READER_GATE,
+                               ST21NFCB_EVT_SE_SOFT_RESET, NULL, 0);
+       } else {
+               info->se_info.xch_error = false;
+               nci_hci_send_event(info->ndlc->ndev, ST21NFCB_DEVICE_MGNT_GATE,
+                               ST21NFCB_EVT_SE_HARD_RESET, &param, 1);
+       }
+       info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
+}
+
+static void st21nfcb_se_activation_timeout(unsigned long data)
+{
+       struct st21nfcb_nci_info *info = (struct st21nfcb_nci_info *) data;
+
+       pr_debug("\n");
+
+       info->se_info.se_active = false;
+
+       complete(&info->se_info.req_completion);
+}
+
+int st21nfcb_se_init(struct nci_dev *ndev)
+{
+       struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+
+       init_completion(&info->se_info.req_completion);
+       /* initialize timers */
+       init_timer(&info->se_info.bwi_timer);
+       info->se_info.bwi_timer.data = (unsigned long)info;
+       info->se_info.bwi_timer.function = st21nfcb_se_wt_timeout;
+       info->se_info.bwi_active = false;
+
+       init_timer(&info->se_info.se_active_timer);
+       info->se_info.se_active_timer.data = (unsigned long)info;
+       info->se_info.se_active_timer.function =
+                       st21nfcb_se_activation_timeout;
+       info->se_info.se_active = false;
+
+       info->se_info.xch_error = false;
+
+       info->se_info.wt_timeout =
+               ST21NFCB_BWI_TO_TIMEOUT(ST21NFCB_ATR_DEFAULT_BWI);
+
+       return 0;
+}
+EXPORT_SYMBOL(st21nfcb_se_init);
+
+void st21nfcb_se_deinit(struct nci_dev *ndev)
+{
+       struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+
+       if (info->se_info.bwi_active)
+               del_timer_sync(&info->se_info.bwi_timer);
+       if (info->se_info.se_active)
+               del_timer_sync(&info->se_info.se_active_timer);
+
+       info->se_info.se_active = false;
+       info->se_info.bwi_active = false;
+}
+EXPORT_SYMBOL(st21nfcb_se_deinit);
+
diff --git a/drivers/nfc/st21nfcb/st21nfcb_se.h b/drivers/nfc/st21nfcb/st21nfcb_se.h
new file mode 100644 (file)
index 0000000..52a3238
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * NCI based Driver for STMicroelectronics NFC Chip
+ *
+ * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __LOCAL_ST21NFCB_SE_H_
+#define __LOCAL_ST21NFCB_SE_H_
+
+/*
+ * ref ISO7816-3 chap 8.1. the initial character TS is followed by a
+ * sequence of at most 32 characters.
+ */
+#define ST21NFCB_ESE_MAX_LENGTH                33
+#define ST21NFCB_HCI_HOST_ID_ESE       0xc0
+
+struct st21nfcb_se_info {
+       u8 atr[ST21NFCB_ESE_MAX_LENGTH];
+       struct completion req_completion;
+
+       struct timer_list bwi_timer;
+       int wt_timeout; /* in msecs */
+       bool bwi_active;
+
+       struct timer_list se_active_timer;
+       bool se_active;
+
+       bool xch_error;
+
+       se_io_cb_t cb;
+       void *cb_context;
+};
+
+int st21nfcb_se_init(struct nci_dev *ndev);
+void st21nfcb_se_deinit(struct nci_dev *ndev);
+
+int st21nfcb_nci_discover_se(struct nci_dev *ndev);
+int st21nfcb_nci_enable_se(struct nci_dev *ndev, u32 se_idx);
+int st21nfcb_nci_disable_se(struct nci_dev *ndev, u32 se_idx);
+int st21nfcb_nci_se_io(struct nci_dev *ndev, u32 se_idx,
+                                       u8 *apdu, size_t apdu_length,
+                                       se_io_cb_t cb, void *cb_context);
+int st21nfcb_hci_load_session(struct nci_dev *ndev);
+void st21nfcb_hci_event_received(struct nci_dev *ndev, u8 pipe,
+                                u8 event, struct sk_buff *skb);
+void st21nfcb_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
+                              struct sk_buff *skb);
+
+
+#endif /* __LOCAL_ST21NFCB_NCI_H_ */
index ea63fbd228ed684234722c6f35db36060559accf..352b4f28f82cd729fb842a210e7460ff9f123833 100644 (file)
@@ -114,17 +114,6 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov,
                ret = of_overlay_apply_one(ov, tchild, child);
                if (ret)
                        return ret;
-
-               /* The properties are already copied, now do the child nodes */
-               for_each_child_of_node(child, grandchild) {
-                       ret = of_overlay_apply_single_device_node(ov, tchild, grandchild);
-                       if (ret) {
-                               pr_err("%s: Failed to apply single node @%s/%s\n",
-                                       __func__, tchild->full_name,
-                                       grandchild->name);
-                               return ret;
-                       }
-               }
        }
 
        return ret;
index 5b33c6a2180752888a36e7a15f81b81f29c8477a..b0d50d70a8a1d4772cae2075223959406059ea10 100644 (file)
@@ -188,7 +188,7 @@ static void of_dma_configure(struct device *dev)
                size = dev->coherent_dma_mask;
        } else {
                offset = PFN_DOWN(paddr - dma_addr);
-               dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset);
+               dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset);
        }
        dev->dma_pfn_offset = offset;
 
@@ -566,6 +566,10 @@ static int of_platform_notify(struct notifier_block *nb,
                if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS))
                        return NOTIFY_OK;       /* not for us */
 
+               /* already populated? (driver using of_populate manually) */
+               if (of_node_check_flag(rd->dn, OF_POPULATED))
+                       return NOTIFY_OK;
+
                /* pdev_parent may be NULL when no bus platform device */
                pdev_parent = of_find_device_by_node(rd->dn->parent);
                pdev = of_platform_device_create(rd->dn, NULL,
@@ -581,6 +585,11 @@ static int of_platform_notify(struct notifier_block *nb,
                break;
 
        case OF_RECONFIG_CHANGE_REMOVE:
+
+               /* already depopulated? */
+               if (!of_node_check_flag(rd->dn, OF_POPULATED))
+                       return NOTIFY_OK;
+
                /* find our device by node */
                pdev = of_find_device_by_node(rd->dn);
                if (pdev == NULL)
index 75976da22b2e2c997fe70d7ea1b6c9a1c622face..a2b687d5f324700a0adff54e991905053445a167 100644 (file)
                        };
                };
 
+               overlay10 {
+                       fragment@0 {
+                               target-path = "/testcase-data/overlay-node/test-bus";
+                               __overlay__ {
+
+                                       /* suppress DTC warning */
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+
+                                       test-selftest10 {
+                                               compatible = "selftest";
+                                               status = "okay";
+                                               reg = <10>;
+
+                                               #address-cells = <1>;
+                                               #size-cells = <0>;
+
+                                               test-selftest101 {
+                                                       compatible = "selftest";
+                                                       status = "okay";
+                                                       reg = <1>;
+                                               };
+
+                                       };
+                               };
+                       };
+               };
+
+               overlay11 {
+                       fragment@0 {
+                               target-path = "/testcase-data/overlay-node/test-bus";
+                               __overlay__ {
+
+                                       /* suppress DTC warning */
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+
+                                       test-selftest11 {
+                                               compatible = "selftest";
+                                               status = "okay";
+                                               reg = <11>;
+
+                                               #address-cells = <1>;
+                                               #size-cells = <0>;
+
+                                               test-selftest111 {
+                                                       compatible = "selftest";
+                                                       status = "okay";
+                                                       reg = <1>;
+                                               };
+
+                                       };
+                               };
+                       };
+               };
        };
 };
index 844838e11ef1a1f8f4d6ac9b52721a1b668dc6aa..41a4a138f53b26c547296805eaf425ebb359268f 100644 (file)
@@ -978,6 +978,9 @@ static int selftest_probe(struct platform_device *pdev)
        }
 
        dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
+
+       of_platform_populate(np, NULL, NULL, &pdev->dev);
+
        return 0;
 }
 
@@ -1385,6 +1388,39 @@ static void of_selftest_overlay_8(void)
        selftest(1, "overlay test %d passed\n", 8);
 }
 
+/* test insertion of a bus with parent devices */
+static void of_selftest_overlay_10(void)
+{
+       int ret;
+       char *child_path;
+
+       /* device should disable */
+       ret = of_selftest_apply_overlay_check(10, 10, 0, 1);
+       if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 10))
+               return;
+
+       child_path = kasprintf(GFP_KERNEL, "%s/test-selftest101",
+                       selftest_path(10));
+       if (selftest(child_path, "overlay test %d failed; kasprintf\n", 10))
+               return;
+
+       ret = of_path_platform_device_exists(child_path);
+       kfree(child_path);
+       if (selftest(ret, "overlay test %d failed; no child device\n", 10))
+               return;
+}
+
+/* test insertion of a bus with parent devices (and revert) */
+static void of_selftest_overlay_11(void)
+{
+       int ret;
+
+       /* device should disable */
+       ret = of_selftest_apply_revert_overlay_check(11, 11, 0, 1);
+       if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 11))
+               return;
+}
+
 static void __init of_selftest_overlay(void)
 {
        struct device_node *bus_np = NULL;
@@ -1433,6 +1469,9 @@ static void __init of_selftest_overlay(void)
        of_selftest_overlay_6();
        of_selftest_overlay_8();
 
+       of_selftest_overlay_10();
+       of_selftest_overlay_11();
+
 out:
        of_node_put(bus_np);
 }
index 37e71ff6408dca41ff5790417f549eead9ddea97..dceb9ddfd99af6d754b0190fe2eb99431f64f4a6 100644 (file)
@@ -694,9 +694,8 @@ lba_fixup_bus(struct pci_bus *bus)
                int i;
                /* PCI-PCI Bridge */
                pci_read_bridge_bases(bus);
-               for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
-                       pci_claim_resource(bus->self, i);
-               }
+               for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++)
+                       pci_claim_bridge_resource(bus->self, i);
        } else {
                /* Host-PCI Bridge */
                int err;
index 73aef51a28f0760fefa6b4344235e7f341bebb3d..8fb16188cd82aaff9d346a70f46e0257e468fe29 100644 (file)
@@ -228,6 +228,49 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
 }
 EXPORT_SYMBOL(pci_bus_alloc_resource);
 
+/*
+ * The @idx resource of @dev should be a PCI-PCI bridge window.  If this
+ * resource fits inside a window of an upstream bridge, do nothing.  If it
+ * overlaps an upstream window but extends outside it, clip the resource so
+ * it fits completely inside.
+ */
+bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
+{
+       struct pci_bus *bus = dev->bus;
+       struct resource *res = &dev->resource[idx];
+       struct resource orig_res = *res;
+       struct resource *r;
+       int i;
+
+       pci_bus_for_each_resource(bus, r, i) {
+               resource_size_t start, end;
+
+               if (!r)
+                       continue;
+
+               if (resource_type(res) != resource_type(r))
+                       continue;
+
+               start = max(r->start, res->start);
+               end = min(r->end, res->end);
+
+               if (start > end)
+                       continue;       /* no overlap */
+
+               if (res->start == start && res->end == end)
+                       return false;   /* no change */
+
+               res->start = start;
+               res->end = end;
+               dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
+                                &orig_res, res);
+
+               return true;
+       }
+
+       return false;
+}
+
 void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
 
 /**
index df781cdf13c1871e265eb933e8baadde09e2dd0a..17ca98657a2866820233d2760f339d26a2278cf7 100644 (file)
@@ -283,6 +283,9 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
        struct msi_msg msg;
        struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
 
+       if (desc->msi_attrib.is_msix)
+               return -EINVAL;
+
        irq = assign_irq(1, desc, &pos);
        if (irq < 0)
                return irq;
index cab05f31223f08166cc0a8e179759fc99812a286..e9d4fd861ba1c84a82f9561841d4e1da26715a2a 100644 (file)
@@ -3271,7 +3271,8 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
 {
        struct pci_dev *pdev;
 
-       if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
+       if (pci_is_root_bus(dev->bus) || dev->subordinate ||
+           !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
                return -ENOTTY;
 
        list_for_each_entry(pdev, &dev->bus->devices, bus_list)
@@ -3305,7 +3306,8 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
 {
        struct pci_dev *pdev;
 
-       if (dev->subordinate || !dev->slot)
+       if (dev->subordinate || !dev->slot ||
+           dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
                return -ENOTTY;
 
        list_for_each_entry(pdev, &dev->bus->devices, bus_list)
@@ -3557,6 +3559,20 @@ int pci_try_reset_function(struct pci_dev *dev)
 }
 EXPORT_SYMBOL_GPL(pci_try_reset_function);
 
+/* Do any devices on or below this bus prevent a bus reset? */
+static bool pci_bus_resetable(struct pci_bus *bus)
+{
+       struct pci_dev *dev;
+
+       list_for_each_entry(dev, &bus->devices, bus_list) {
+               if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+                   (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+                       return false;
+       }
+
+       return true;
+}
+
 /* Lock devices from the top of the tree down */
 static void pci_bus_lock(struct pci_bus *bus)
 {
@@ -3607,6 +3623,22 @@ unlock:
        return 0;
 }
 
+/* Do any devices on or below this slot prevent a bus reset? */
+static bool pci_slot_resetable(struct pci_slot *slot)
+{
+       struct pci_dev *dev;
+
+       list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+               if (!dev->slot || dev->slot != slot)
+                       continue;
+               if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+                   (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+                       return false;
+       }
+
+       return true;
+}
+
 /* Lock devices from the top of the tree down */
 static void pci_slot_lock(struct pci_slot *slot)
 {
@@ -3728,7 +3760,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe)
 {
        int rc;
 
-       if (!slot)
+       if (!slot || !pci_slot_resetable(slot))
                return -ENOTTY;
 
        if (!probe)
@@ -3820,7 +3852,7 @@ EXPORT_SYMBOL_GPL(pci_try_reset_slot);
 
 static int pci_bus_reset(struct pci_bus *bus, int probe)
 {
-       if (!bus->self)
+       if (!bus->self || !pci_bus_resetable(bus))
                return -ENOTTY;
 
        if (probe)
index 8aff29a804ffa6e9ddaa30e2277d5d654f1df508..d54632a1db43cbed30d89fb91b84fef489cbc4a4 100644 (file)
@@ -208,6 +208,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus,
 void __pci_bus_assign_resources(const struct pci_bus *bus,
                                struct list_head *realloc_head,
                                struct list_head *fail_head);
+bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
 
 /**
  * pci_ari_enabled - query ARI forwarding status
index ed6f89b6efe5c2cab127dd15613cab59be736fd7..903d5078b5ede8872fc71ec662ecadf230e13950 100644 (file)
@@ -324,18 +324,52 @@ static void quirk_s3_64M(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,     PCI_DEVICE_ID_S3_868,           quirk_s3_64M);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,     PCI_DEVICE_ID_S3_968,           quirk_s3_64M);
 
+static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
+                    const char *name)
+{
+       u32 region;
+       struct pci_bus_region bus_region;
+       struct resource *res = dev->resource + pos;
+
+       pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
+
+       if (!region)
+               return;
+
+       res->name = pci_name(dev);
+       res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
+       res->flags |=
+               (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
+       region &= ~(size - 1);
+
+       /* Convert from PCI bus to resource space */
+       bus_region.start = region;
+       bus_region.end = region + size - 1;
+       pcibios_bus_to_resource(dev->bus, res, &bus_region);
+
+       dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
+                name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
+}
+
 /*
  * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
  * ver. 1.33  20070103) don't set the correct ISA PCI region header info.
  * BAR0 should be 8 bytes; instead, it may be set to something like 8k
  * (which conflicts w/ BAR1's memory range).
+ *
+ * CS553x's ISA PCI BARs may also be read-only (ref:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward).
  */
 static void quirk_cs5536_vsa(struct pci_dev *dev)
 {
+       static char *name = "CS5536 ISA bridge";
+
        if (pci_resource_len(dev, 0) != 8) {
-               struct resource *res = &dev->resource[0];
-               res->end = res->start + 8 - 1;
-               dev_info(&dev->dev, "CS5536 ISA bridge bug detected (incorrect header); workaround applied\n");
+               quirk_io(dev, 0,   8, name);    /* SMB */
+               quirk_io(dev, 1, 256, name);    /* GPIO */
+               quirk_io(dev, 2,  64, name);    /* MFGPT */
+               dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n",
+                        name);
        }
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
@@ -3028,6 +3062,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
                         quirk_broken_intx_masking);
 
+static void quirk_no_bus_reset(struct pci_dev *dev)
+{
+       dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
+}
+
+/*
+ * Atheros AR93xx chips do not behave after a bus reset.  The device will
+ * throw a Link Down error on AER-capable systems and regardless of AER,
+ * config space of the device is never accessible again and typically
+ * causes the system to hang or reset when access is attempted.
+ * http://www.spinics.net/lists/linux-pci/msg34797.html
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
+
 #ifdef CONFIG_ACPI
 /*
  * Apple: Shutdown Cactus Ridge Thunderbolt controller.
index 0482235eee9262f79ecd69f907c8c41c2364d914..e3e17f3c0f0f2929da94d7411c0740a391dc56c6 100644 (file)
@@ -530,9 +530,8 @@ EXPORT_SYMBOL(pci_setup_cardbus);
    config space writes, so it's quite possible that an I/O window of
    the bridge will have some undesirable address (e.g. 0) after the
    first write. Ditto 64-bit prefetchable MMIO.  */
-static void pci_setup_bridge_io(struct pci_bus *bus)
+static void pci_setup_bridge_io(struct pci_dev *bridge)
 {
-       struct pci_dev *bridge = bus->self;
        struct resource *res;
        struct pci_bus_region region;
        unsigned long io_mask;
@@ -545,7 +544,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
                io_mask = PCI_IO_1K_RANGE_MASK;
 
        /* Set up the top and bottom of the PCI I/O segment for this bus. */
-       res = bus->resource[0];
+       res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
        pcibios_resource_to_bus(bridge->bus, &region, res);
        if (res->flags & IORESOURCE_IO) {
                pci_read_config_word(bridge, PCI_IO_BASE, &l);
@@ -568,15 +567,14 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
        pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
 }
 
-static void pci_setup_bridge_mmio(struct pci_bus *bus)
+static void pci_setup_bridge_mmio(struct pci_dev *bridge)
 {
-       struct pci_dev *bridge = bus->self;
        struct resource *res;
        struct pci_bus_region region;
        u32 l;
 
        /* Set up the top and bottom of the PCI Memory segment for this bus. */
-       res = bus->resource[1];
+       res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
        pcibios_resource_to_bus(bridge->bus, &region, res);
        if (res->flags & IORESOURCE_MEM) {
                l = (region.start >> 16) & 0xfff0;
@@ -588,9 +586,8 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus)
        pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
 }
 
-static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
+static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
 {
-       struct pci_dev *bridge = bus->self;
        struct resource *res;
        struct pci_bus_region region;
        u32 l, bu, lu;
@@ -602,7 +599,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
 
        /* Set up PREF base/limit. */
        bu = lu = 0;
-       res = bus->resource[2];
+       res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
        pcibios_resource_to_bus(bridge->bus, &region, res);
        if (res->flags & IORESOURCE_PREFETCH) {
                l = (region.start >> 16) & 0xfff0;
@@ -630,13 +627,13 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
                 &bus->busn_res);
 
        if (type & IORESOURCE_IO)
-               pci_setup_bridge_io(bus);
+               pci_setup_bridge_io(bridge);
 
        if (type & IORESOURCE_MEM)
-               pci_setup_bridge_mmio(bus);
+               pci_setup_bridge_mmio(bridge);
 
        if (type & IORESOURCE_PREFETCH)
-               pci_setup_bridge_mmio_pref(bus);
+               pci_setup_bridge_mmio_pref(bridge);
 
        pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
 }
@@ -649,6 +646,41 @@ void pci_setup_bridge(struct pci_bus *bus)
        __pci_setup_bridge(bus, type);
 }
 
+
+int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
+{
+       if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
+               return 0;
+
+       if (pci_claim_resource(bridge, i) == 0)
+               return 0;       /* claimed the window */
+
+       if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+               return 0;
+
+       if (!pci_bus_clip_resource(bridge, i))
+               return -EINVAL; /* clipping didn't change anything */
+
+       switch (i - PCI_BRIDGE_RESOURCES) {
+       case 0:
+               pci_setup_bridge_io(bridge);
+               break;
+       case 1:
+               pci_setup_bridge_mmio(bridge);
+               break;
+       case 2:
+               pci_setup_bridge_mmio_pref(bridge);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (pci_claim_resource(bridge, i) == 0)
+               return 0;       /* claimed a smaller window */
+
+       return -EINVAL;
+}
+
 /* Check whether the bridge supports optional I/O and
    prefetchable memory ranges. If not, the respective
    base/limit registers must be read-only and read as 0. */
index e34da13885e8c422a7fdecc153d6407c0b52999f..27fa62ce613618debb31fadb6bb50a51d9e65893 100644 (file)
@@ -1050,7 +1050,8 @@ static int miphy28lp_init(struct phy *phy)
                ret = miphy28lp_init_usb3(miphy_phy);
                break;
        default:
-               return -EINVAL;
+               ret = -EINVAL;
+               break;
        }
 
        mutex_unlock(&miphy_dev->miphy_mutex);
index 6ab43a814ad2d9a1d61a745e982cb1f0ed3e87bb..6c80154e8bffb23253e03d546edd68a11ad3e0b0 100644 (file)
@@ -141,7 +141,7 @@ struct miphy365x_phy {
        bool pcie_tx_pol_inv;
        bool sata_tx_pol_inv;
        u32 sata_gen;
-       u64 ctrlreg;
+       u32 ctrlreg;
        u8 type;
 };
 
@@ -179,7 +179,7 @@ static int miphy365x_set_path(struct miphy365x_phy *miphy_phy,
        bool sata = (miphy_phy->type == MIPHY_TYPE_SATA);
 
        return regmap_update_bits(miphy_dev->regmap,
-                                 (unsigned int)miphy_phy->ctrlreg,
+                                 miphy_phy->ctrlreg,
                                  SYSCFG_SELECT_SATA_MASK,
                                  sata << SYSCFG_SELECT_SATA_POS);
 }
@@ -445,7 +445,6 @@ int miphy365x_get_addr(struct device *dev, struct miphy365x_phy *miphy_phy,
 {
        struct device_node *phynode = miphy_phy->phy->dev.of_node;
        const char *name;
-       const __be32 *taddr;
        int type = miphy_phy->type;
        int ret;
 
@@ -455,22 +454,6 @@ int miphy365x_get_addr(struct device *dev, struct miphy365x_phy *miphy_phy,
                return ret;
        }
 
-       if (!strncmp(name, "syscfg", 6)) {
-               taddr = of_get_address(phynode, index, NULL, NULL);
-               if (!taddr) {
-                       dev_err(dev, "failed to fetch syscfg address\n");
-                       return -EINVAL;
-               }
-
-               miphy_phy->ctrlreg = of_translate_address(phynode, taddr);
-               if (miphy_phy->ctrlreg == OF_BAD_ADDR) {
-                       dev_err(dev, "failed to translate syscfg address\n");
-                       return -EINVAL;
-               }
-
-               return 0;
-       }
-
        if (!((!strncmp(name, "sata", 4) && type == MIPHY_TYPE_SATA) ||
              (!strncmp(name, "pcie", 4) && type == MIPHY_TYPE_PCIE)))
                return 0;
@@ -606,7 +589,15 @@ static int miphy365x_probe(struct platform_device *pdev)
                        return ret;
 
                phy_set_drvdata(phy, miphy_dev->phys[port]);
+
                port++;
+               /* sysconfig offsets are indexed from 1 */
+               ret = of_property_read_u32_index(np, "st,syscfg", port,
+                                       &miphy_phy->ctrlreg);
+               if (ret) {
+                       dev_err(&pdev->dev, "No sysconfig offset found\n");
+                       return ret;
+               }
        }
 
        provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate);
index c96e8183a8ffe061e017976daa40632ac5a22c56..efe724f97e02fbf9eba84c215628993fa3ea1274 100644 (file)
 /**
  * omap_control_pcie_pcs - set the PCS delay count
  * @dev: the control module device
- * @id: index of the pcie PHY (should be 1 or 2)
  * @delay: 8 bit delay value
  */
-void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
+void omap_control_pcie_pcs(struct device *dev, u8 delay)
 {
        u32 val;
        struct omap_control_phy *control_phy;
@@ -55,8 +54,8 @@ void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
 
        val = readl(control_phy->pcie_pcs);
        val &= ~(OMAP_CTRL_PCIE_PCS_MASK <<
-               (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT));
-       val |= delay << (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
+               OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
+       val |= (delay << OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
        writel(val, control_phy->pcie_pcs);
 }
 EXPORT_SYMBOL_GPL(omap_control_pcie_pcs);
index 74f0fab3cd8a9df0e29dacc08e04338fbb7762e1..1d5ae5f8ef694cf9ca389dd3a0742bec189a6955 100644 (file)
@@ -22,6 +22,9 @@
 #include <linux/mfd/syscon.h>
 #include <linux/phy/phy.h>
 
+#define PHYPARAM_REG   1
+#define PHYCTRL_REG    2
+
 /* Default PHY_SEL and REFCLKSEL configuration */
 #define STIH407_USB_PICOPHY_CTRL_PORT_CONF     0x6
 #define STIH407_USB_PICOPHY_CTRL_PORT_MASK     0x1f
@@ -93,7 +96,7 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
        struct device_node *np = dev->of_node;
        struct phy_provider *phy_provider;
        struct phy *phy;
-       struct resource *res;
+       int ret;
 
        phy_dev = devm_kzalloc(dev, sizeof(*phy_dev), GFP_KERNEL);
        if (!phy_dev)
@@ -123,19 +126,19 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
                return PTR_ERR(phy_dev->regmap);
        }
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
-       if (!res) {
-               dev_err(dev, "No ctrl reg found\n");
-               return -ENXIO;
+       ret = of_property_read_u32_index(np, "st,syscfg", PHYPARAM_REG,
+                                       &phy_dev->param);
+       if (ret) {
+               dev_err(dev, "can't get phyparam offset (%d)\n", ret);
+               return ret;
        }
-       phy_dev->ctrl = res->start;
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "param");
-       if (!res) {
-               dev_err(dev, "No param reg found\n");
-               return -ENXIO;
+       ret = of_property_read_u32_index(np, "st,syscfg", PHYCTRL_REG,
+                                       &phy_dev->ctrl);
+       if (ret) {
+               dev_err(dev, "can't get phyctrl offset (%d)\n", ret);
+               return ret;
        }
-       phy_dev->param = res->start;
 
        phy = devm_phy_create(dev, NULL, &stih407_usb2_picophy_data);
        if (IS_ERR(phy)) {
index fb02a67c91811e5291757c8fbb330439c3d95e89..a2b08f3ccb031cbf3a484cb0afab5665c0fb3ad7 100644 (file)
@@ -244,7 +244,8 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
        else
                data->num_phys = 3;
 
-       if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy"))
+       if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy") ||
+           of_device_is_compatible(np, "allwinner,sun6i-a31-usb-phy"))
                data->disc_thresh = 3;
        else
                data->disc_thresh = 2;
index 1387b4d4afe376556661f49938bf71bb85572d94..465de2c800f228d7fa4961083e243349f5fbcd33 100644 (file)
@@ -82,7 +82,6 @@ struct ti_pipe3 {
        struct clk              *refclk;
        struct clk              *div_clk;
        struct pipe3_dpll_map   *dpll_map;
-       u8                      id;
 };
 
 static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -217,8 +216,13 @@ static int ti_pipe3_init(struct phy *x)
        u32 val;
        int ret = 0;
 
+       /*
+        * Set pcie_pcs register to 0x96 for proper functioning of phy
+        * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
+        * 18-1804.
+        */
        if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
-               omap_control_pcie_pcs(phy->control_dev, phy->id, 0xF1);
+               omap_control_pcie_pcs(phy->control_dev, 0x96);
                return 0;
        }
 
@@ -347,8 +351,6 @@ static int ti_pipe3_probe(struct platform_device *pdev)
        }
 
        if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
-               if (of_property_read_u8(node, "id", &phy->id) < 0)
-                       phy->id = 1;
 
                clk = devm_clk_get(phy->dev, "dpll_ref");
                if (IS_ERR(clk)) {
index e4f65510c87e8928666e195b6b76e52cae9252b5..89dca77ca0382e93909188cad63ccd9ef6bff41b 100644 (file)
@@ -1801,14 +1801,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
        if (pctldev == NULL)
                return;
 
-       mutex_lock(&pinctrldev_list_mutex);
        mutex_lock(&pctldev->mutex);
-
        pinctrl_remove_device_debugfs(pctldev);
+       mutex_unlock(&pctldev->mutex);
 
        if (!IS_ERR(pctldev->p))
                pinctrl_put(pctldev->p);
 
+       mutex_lock(&pinctrldev_list_mutex);
+       mutex_lock(&pctldev->mutex);
        /* TODO: check that no pinmuxes are still active? */
        list_del(&pctldev->node);
        /* Destroy descriptor tree */
index dfd021e8268f40e8f8900bdb5e69f79c389accd6..f4cd0b9b2438b3548fafa4746b6c6a6cf8874bb4 100644 (file)
@@ -177,7 +177,7 @@ struct at91_pinctrl {
        struct device           *dev;
        struct pinctrl_dev      *pctl;
 
-       int                     nbanks;
+       int                     nactive_banks;
 
        uint32_t                *mux_mask;
        int                     nmux;
@@ -653,12 +653,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name,
        int mux;
 
        /* check if it's a valid config */
-       if (pin->bank >= info->nbanks) {
+       if (pin->bank >= gpio_banks) {
                dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n",
-                       name, index, pin->bank, info->nbanks);
+                       name, index, pin->bank, gpio_banks);
                return -EINVAL;
        }
 
+       if (!gpio_chips[pin->bank]) {
+               dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n",
+                       name, index, pin->bank);
+               return -ENXIO;
+       }
+
        if (pin->pin >= MAX_NB_GPIO_PER_BANK) {
                dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n",
                        name, index, pin->pin, MAX_NB_GPIO_PER_BANK);
@@ -981,7 +987,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info,
 
        for_each_child_of_node(np, child) {
                if (of_device_is_compatible(child, gpio_compat)) {
-                       info->nbanks++;
+                       if (of_device_is_available(child))
+                               info->nactive_banks++;
                } else {
                        info->nfunctions++;
                        info->ngroups += of_get_child_count(child);
@@ -1003,11 +1010,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info,
        }
 
        size /= sizeof(*list);
-       if (!size || size % info->nbanks) {
-               dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks);
+       if (!size || size % gpio_banks) {
+               dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks);
                return -EINVAL;
        }
-       info->nmux = size / info->nbanks;
+       info->nmux = size / gpio_banks;
 
        info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL);
        if (!info->mux_mask) {
@@ -1131,7 +1138,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
                of_match_device(at91_pinctrl_of_match, &pdev->dev)->data;
        at91_pinctrl_child_count(info, np);
 
-       if (info->nbanks < 1) {
+       if (gpio_banks < 1) {
                dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n");
                return -EINVAL;
        }
@@ -1144,7 +1151,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
 
        dev_dbg(&pdev->dev, "mux-mask\n");
        tmp = info->mux_mask;
-       for (i = 0; i < info->nbanks; i++) {
+       for (i = 0; i < gpio_banks; i++) {
                for (j = 0; j < info->nmux; j++, tmp++) {
                        dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]);
                }
@@ -1162,7 +1169,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
        if (!info->groups)
                return -ENOMEM;
 
-       dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks);
+       dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks);
        dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
        dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
 
@@ -1185,7 +1192,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
 {
        struct at91_pinctrl *info;
        struct pinctrl_pin_desc *pdesc;
-       int ret, i, j, k;
+       int ret, i, j, k, ngpio_chips_enabled = 0;
 
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
@@ -1200,23 +1207,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
         * to obtain references to the struct gpio_chip * for them, and we
         * need this to proceed.
         */
-       for (i = 0; i < info->nbanks; i++) {
-               if (!gpio_chips[i]) {
-                       dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
-                       devm_kfree(&pdev->dev, info);
-                       return -EPROBE_DEFER;
-               }
+       for (i = 0; i < gpio_banks; i++)
+               if (gpio_chips[i])
+                       ngpio_chips_enabled++;
+
+       if (ngpio_chips_enabled < info->nactive_banks) {
+               dev_warn(&pdev->dev,
+                        "All GPIO chips are not registered yet (%d/%d)\n",
+                        ngpio_chips_enabled, info->nactive_banks);
+               devm_kfree(&pdev->dev, info);
+               return -EPROBE_DEFER;
        }
 
        at91_pinctrl_desc.name = dev_name(&pdev->dev);
-       at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK;
+       at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK;
        at91_pinctrl_desc.pins = pdesc =
                devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL);
 
        if (!at91_pinctrl_desc.pins)
                return -ENOMEM;
 
-       for (i = 0 , k = 0; i < info->nbanks; i++) {
+       for (i = 0, k = 0; i < gpio_banks; i++) {
                for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) {
                        pdesc->number = k;
                        pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j);
@@ -1234,8 +1245,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
        }
 
        /* We will handle a range of GPIO pins */
-       for (i = 0; i < info->nbanks; i++)
-               pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
+       for (i = 0; i < gpio_banks; i++)
+               if (gpio_chips[i])
+                       pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
 
        dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n");
 
@@ -1613,9 +1625,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
 static int at91_gpio_of_irq_setup(struct platform_device *pdev,
                                  struct at91_gpio_chip *at91_gpio)
 {
+       struct gpio_chip        *gpiochip_prev = NULL;
        struct at91_gpio_chip   *prev = NULL;
        struct irq_data         *d = irq_get_irq_data(at91_gpio->pioc_virq);
-       int ret;
+       int ret, i;
 
        at91_gpio->pioc_hwirq = irqd_to_hwirq(d);
 
@@ -1641,24 +1654,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
                return ret;
        }
 
-       /* Setup chained handler */
-       if (at91_gpio->pioc_idx)
-               prev = gpio_chips[at91_gpio->pioc_idx - 1];
-
        /* The top level handler handles one bank of GPIOs, except
         * on some SoC it can handle up to three...
         * We only set up the handler for the first of the list.
         */
-       if (prev && prev->next == at91_gpio)
+       gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
+       if (!gpiochip_prev) {
+               /* Then register the chain on the parent IRQ */
+               gpiochip_set_chained_irqchip(&at91_gpio->chip,
+                                            &gpio_irqchip,
+                                            at91_gpio->pioc_virq,
+                                            gpio_irq_handler);
                return 0;
+       }
 
-       /* Then register the chain on the parent IRQ */
-       gpiochip_set_chained_irqchip(&at91_gpio->chip,
-                                    &gpio_irqchip,
-                                    at91_gpio->pioc_virq,
-                                    gpio_irq_handler);
+       prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip);
 
-       return 0;
+       /* we can only have 2 banks before */
+       for (i = 0; i < 2; i++) {
+               if (prev->next) {
+                       prev = prev->next;
+               } else {
+                       prev->next = at91_gpio;
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
 }
 
 /* This structure is replicated for each GPIO block allocated at probe time */
@@ -1675,24 +1697,6 @@ static struct gpio_chip at91_gpio_template = {
        .ngpio                  = MAX_NB_GPIO_PER_BANK,
 };
 
-static void at91_gpio_probe_fixup(void)
-{
-       unsigned i;
-       struct at91_gpio_chip *at91_gpio, *last = NULL;
-
-       for (i = 0; i < gpio_banks; i++) {
-               at91_gpio = gpio_chips[i];
-
-               /*
-                * GPIO controller are grouped on some SoC:
-                * PIOC, PIOD and PIOE can share the same IRQ line
-                */
-               if (last && last->pioc_virq == at91_gpio->pioc_virq)
-                       last->next = at91_gpio;
-               last = at91_gpio;
-       }
-}
-
 static struct of_device_id at91_gpio_of_match[] = {
        { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },
        { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops },
@@ -1805,8 +1809,6 @@ static int at91_gpio_probe(struct platform_device *pdev)
        gpio_chips[alias_idx] = at91_chip;
        gpio_banks = max(gpio_banks, alias_idx + 1);
 
-       at91_gpio_probe_fixup();
-
        ret = at91_gpio_of_irq_setup(pdev, at91_chip);
        if (ret)
                goto irq_setup_err;
index ba74f0aa60c76ac76d90d2edaff8c219d280e6a3..43eacc924b7eb96c0cb0b4fd02d2f91f3c9a2fde 100644 (file)
@@ -89,6 +89,7 @@ struct rockchip_iomux {
  * @reg_pull: optional separate register for additional pull settings
  * @clk: clock of the gpio bank
  * @irq: interrupt of the gpio bank
+ * @saved_enables: Saved content of GPIO_INTEN at suspend time.
  * @pin_base: first pin number
  * @nr_pins: number of pins in this bank
  * @name: name of the bank
@@ -107,6 +108,7 @@ struct rockchip_pin_bank {
        struct regmap                   *regmap_pull;
        struct clk                      *clk;
        int                             irq;
+       u32                             saved_enables;
        u32                             pin_base;
        u8                              nr_pins;
        char                            *name;
@@ -1396,10 +1398,7 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
 {
        struct irq_chip *chip = irq_get_chip(irq);
        struct rockchip_pin_bank *bank = irq_get_handler_data(irq);
-       u32 polarity = 0, data = 0;
        u32 pend;
-       bool edge_changed = false;
-       unsigned long flags;
 
        dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
 
@@ -1407,12 +1406,6 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
 
        pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS);
 
-       if (bank->toggle_edge_mode) {
-               polarity = readl_relaxed(bank->reg_base +
-                                        GPIO_INT_POLARITY);
-               data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
-       }
-
        while (pend) {
                unsigned int virq;
 
@@ -1432,27 +1425,31 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
                 * needs manual intervention.
                 */
                if (bank->toggle_edge_mode & BIT(irq)) {
-                       if (data & BIT(irq))
-                               polarity &= ~BIT(irq);
-                       else
-                               polarity |= BIT(irq);
+                       u32 data, data_old, polarity;
+                       unsigned long flags;
 
-                       edge_changed = true;
-               }
+                       data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
+                       do {
+                               spin_lock_irqsave(&bank->slock, flags);
 
-               generic_handle_irq(virq);
-       }
+                               polarity = readl_relaxed(bank->reg_base +
+                                                        GPIO_INT_POLARITY);
+                               if (data & BIT(irq))
+                                       polarity &= ~BIT(irq);
+                               else
+                                       polarity |= BIT(irq);
+                               writel(polarity,
+                                      bank->reg_base + GPIO_INT_POLARITY);
 
-       if (bank->toggle_edge_mode && edge_changed) {
-               /* Interrupt params should only be set with ints disabled */
-               spin_lock_irqsave(&bank->slock, flags);
+                               spin_unlock_irqrestore(&bank->slock, flags);
 
-               data = readl_relaxed(bank->reg_base + GPIO_INTEN);
-               writel_relaxed(0, bank->reg_base + GPIO_INTEN);
-               writel(polarity, bank->reg_base + GPIO_INT_POLARITY);
-               writel(data, bank->reg_base + GPIO_INTEN);
+                               data_old = data;
+                               data = readl_relaxed(bank->reg_base +
+                                                    GPIO_EXT_PORT);
+                       } while ((data & BIT(irq)) != (data_old & BIT(irq)));
+               }
 
-               spin_unlock_irqrestore(&bank->slock, flags);
+               generic_handle_irq(virq);
        }
 
        chained_irq_exit(chip, desc);
@@ -1543,6 +1540,51 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
        return 0;
 }
 
+static void rockchip_irq_suspend(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct rockchip_pin_bank *bank = gc->private;
+
+       bank->saved_enables = irq_reg_readl(gc, GPIO_INTEN);
+       irq_reg_writel(gc, gc->wake_active, GPIO_INTEN);
+}
+
+static void rockchip_irq_resume(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct rockchip_pin_bank *bank = gc->private;
+
+       irq_reg_writel(gc, bank->saved_enables, GPIO_INTEN);
+}
+
+static void rockchip_irq_disable(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       u32 val;
+
+       irq_gc_lock(gc);
+
+       val = irq_reg_readl(gc, GPIO_INTEN);
+       val &= ~d->mask;
+       irq_reg_writel(gc, val, GPIO_INTEN);
+
+       irq_gc_unlock(gc);
+}
+
+static void rockchip_irq_enable(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       u32 val;
+
+       irq_gc_lock(gc);
+
+       val = irq_reg_readl(gc, GPIO_INTEN);
+       val |= d->mask;
+       irq_reg_writel(gc, val, GPIO_INTEN);
+
+       irq_gc_unlock(gc);
+}
+
 static int rockchip_interrupts_register(struct platform_device *pdev,
                                                struct rockchip_pinctrl *info)
 {
@@ -1581,12 +1623,16 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
                gc = irq_get_domain_generic_chip(bank->domain, 0);
                gc->reg_base = bank->reg_base;
                gc->private = bank;
-               gc->chip_types[0].regs.mask = GPIO_INTEN;
+               gc->chip_types[0].regs.mask = GPIO_INTMASK;
                gc->chip_types[0].regs.ack = GPIO_PORTS_EOI;
                gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
-               gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
-               gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+               gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+               gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+               gc->chip_types[0].chip.irq_enable = rockchip_irq_enable;
+               gc->chip_types[0].chip.irq_disable = rockchip_irq_disable;
                gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
+               gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
+               gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
                gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
                gc->wake_enabled = IRQ_MSK(bank->nr_pins);
 
index 7c9d51382248d9c5b3069653d72f26e20751c214..9e5ec00084bb1dcc2aad395af7c37d2bf0e3044a 100644 (file)
@@ -1012,8 +1012,10 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev,
                                   struct seq_file *s, unsigned pin_id)
 {
        unsigned long config;
-       st_pinconf_get(pctldev, pin_id, &config);
 
+       mutex_unlock(&pctldev->mutex);
+       st_pinconf_get(pctldev, pin_id, &config);
+       mutex_lock(&pctldev->mutex);
        seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n"
                "\t\t[retime:%ld,invclk:%ld,clknotdat:%ld,"
                "de:%ld,rt-clk:%ld,rt-delay:%ld]",
@@ -1443,6 +1445,7 @@ static struct gpio_chip st_gpio_template = {
 
 static struct irq_chip st_gpio_irqchip = {
        .name           = "GPIO",
+       .irq_disable    = st_gpio_irq_mask,
        .irq_mask       = st_gpio_irq_mask,
        .irq_unmask     = st_gpio_irq_unmask,
        .irq_set_type   = st_gpio_irq_set_type,
index c5cef59f59654cad158677f8dbc664051a222508..779950c62e53d238e7a4c35d7290ca66475fed86 100644 (file)
@@ -798,10 +798,8 @@ static int pinmux_xway_probe(struct platform_device *pdev)
 
        /* load the gpio chip */
        xway_chip.dev = &pdev->dev;
-       of_gpiochip_add(&xway_chip);
        ret = gpiochip_add(&xway_chip);
        if (ret) {
-               of_gpiochip_remove(&xway_chip);
                dev_err(&pdev->dev, "Failed to register gpio chip\n");
                return ret;
        }
index e730935fa4577deb2582ea493ab60aa2b0f176fe..ed7017df065d3f002bada2659f4be9baccf6c1f2 100644 (file)
@@ -865,10 +865,10 @@ static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action,
 
 static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
 {
-       int i = 0;
+       int i;
        const struct msm_function *func = pctrl->soc->functions;
 
-       for (; i <= pctrl->soc->nfunctions; i++)
+       for (i = 0; i < pctrl->soc->nfunctions; i++)
                if (!strcmp(func[i].name, "ps_hold")) {
                        pctrl->restart_nb.notifier_call = msm_ps_hold_restart;
                        pctrl->restart_nb.priority = 128;
index 9411eae39a4ec5fc32f993d1583a3c329e2365b1..3d21efe11d7b77c511ff651201ca9a14c467e058 100644 (file)
@@ -2,11 +2,9 @@
  *  Driver for Dell laptop extras
  *
  *  Copyright (c) Red Hat <mjg@redhat.com>
- *  Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
- *  Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
  *
- *  Based on documentation in the libsmbios package:
- *  Copyright (C) 2005-2014 Dell Inc.
+ *  Based on documentation in the libsmbios package, Copyright (C) 2005 Dell
+ *  Inc.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License version 2 as
 #include "../../firmware/dcdbas.h"
 
 #define BRIGHTNESS_TOKEN 0x7d
-#define KBD_LED_OFF_TOKEN 0x01E1
-#define KBD_LED_ON_TOKEN 0x01E2
-#define KBD_LED_AUTO_TOKEN 0x01E3
-#define KBD_LED_AUTO_25_TOKEN 0x02EA
-#define KBD_LED_AUTO_50_TOKEN 0x02EB
-#define KBD_LED_AUTO_75_TOKEN 0x02EC
-#define KBD_LED_AUTO_100_TOKEN 0x02F6
 
 /* This structure will be modified by the firmware when we enter
  * system management mode, hence the volatiles */
@@ -71,13 +62,6 @@ struct calling_interface_structure {
 
 struct quirk_entry {
        u8 touchpad_led;
-
-       int needs_kbd_timeouts;
-       /*
-        * Ordered list of timeouts expressed in seconds.
-        * The list must end with -1
-        */
-       int kbd_timeouts[];
 };
 
 static struct quirk_entry *quirks;
@@ -92,15 +76,6 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
        return 1;
 }
 
-/*
- * These values come from Windows utility provided by Dell. If any other value
- * is used then BIOS silently set timeout to 0 without any error message.
- */
-static struct quirk_entry quirk_dell_xps13_9333 = {
-       .needs_kbd_timeouts = 1,
-       .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
-};
-
 static int da_command_address;
 static int da_command_code;
 static int da_num_tokens;
@@ -292,15 +267,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
                },
                .driver_data = &quirk_dell_vostro_v130,
        },
-       {
-               .callback = dmi_matched,
-               .ident = "Dell XPS13 9333",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
-               },
-               .driver_data = &quirk_dell_xps13_9333,
-       },
        { }
 };
 
@@ -365,29 +331,17 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
        }
 }
 
-static int find_token_id(int tokenid)
+static int find_token_location(int tokenid)
 {
        int i;
-
        for (i = 0; i < da_num_tokens; i++) {
                if (da_tokens[i].tokenID == tokenid)
-                       return i;
+                       return da_tokens[i].location;
        }
 
        return -1;
 }
 
-static int find_token_location(int tokenid)
-{
-       int id;
-
-       id = find_token_id(tokenid);
-       if (id == -1)
-               return -1;
-
-       return da_tokens[id].location;
-}
-
 static struct calling_interface_buffer *
 dell_send_request(struct calling_interface_buffer *buffer, int class,
                  int select)
@@ -408,20 +362,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
        return buffer;
 }
 
-static inline int dell_smi_error(int value)
-{
-       switch (value) {
-       case 0: /* Completed successfully */
-               return 0;
-       case -1: /* Completed with error */
-               return -EIO;
-       case -2: /* Function not supported */
-               return -ENXIO;
-       default: /* Unknown error */
-               return -EINVAL;
-       }
-}
-
 /* Derived from information in DellWirelessCtl.cpp:
    Class 17, select 11 is radio control. It returns an array of 32-bit values.
 
@@ -776,7 +716,7 @@ static int dell_send_intensity(struct backlight_device *bd)
        else
                dell_send_request(buffer, 1, 1);
 
- out:
+out:
        release_buffer();
        return ret;
 }
@@ -800,7 +740,7 @@ static int dell_get_intensity(struct backlight_device *bd)
 
        ret = buffer->output[1];
 
- out:
+out:
        release_buffer();
        return ret;
 }
@@ -849,984 +789,6 @@ static void touchpad_led_exit(void)
        led_classdev_unregister(&touchpad_led);
 }
 
-/*
- * Derived from information in smbios-keyboard-ctl:
- *
- * cbClass 4
- * cbSelect 11
- * Keyboard illumination
- * cbArg1 determines the function to be performed
- *
- * cbArg1 0x0 = Get Feature Information
- *  cbRES1         Standard return codes (0, -1, -2)
- *  cbRES2, word0  Bitmap of user-selectable modes
- *     bit 0     Always off (All systems)
- *     bit 1     Always on (Travis ATG, Siberia)
- *     bit 2     Auto: ALS-based On; ALS-based Off (Travis ATG)
- *     bit 3     Auto: ALS- and input-activity-based On; input-activity based Off
- *     bit 4     Auto: Input-activity-based On; input-activity based Off
- *     bit 5     Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- *     bit 6     Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- *     bit 7     Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- *     bit 8     Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- *     bits 9-15 Reserved for future use
- *  cbRES2, byte2  Reserved for future use
- *  cbRES2, byte3  Keyboard illumination type
- *     0         Reserved
- *     1         Tasklight
- *     2         Backlight
- *     3-255     Reserved for future use
- *  cbRES3, byte0  Supported auto keyboard illumination trigger bitmap.
- *     bit 0     Any keystroke
- *     bit 1     Touchpad activity
- *     bit 2     Pointing stick
- *     bit 3     Any mouse
- *     bits 4-7  Reserved for future use
- *  cbRES3, byte1  Supported timeout unit bitmap
- *     bit 0     Seconds
- *     bit 1     Minutes
- *     bit 2     Hours
- *     bit 3     Days
- *     bits 4-7  Reserved for future use
- *  cbRES3, byte2  Number of keyboard light brightness levels
- *  cbRES4, byte0  Maximum acceptable seconds value (0 if seconds not supported).
- *  cbRES4, byte1  Maximum acceptable minutes value (0 if minutes not supported).
- *  cbRES4, byte2  Maximum acceptable hours value (0 if hours not supported).
- *  cbRES4, byte3  Maximum acceptable days value (0 if days not supported)
- *
- * cbArg1 0x1 = Get Current State
- *  cbRES1         Standard return codes (0, -1, -2)
- *  cbRES2, word0  Bitmap of current mode state
- *     bit 0     Always off (All systems)
- *     bit 1     Always on (Travis ATG, Siberia)
- *     bit 2     Auto: ALS-based On; ALS-based Off (Travis ATG)
- *     bit 3     Auto: ALS- and input-activity-based On; input-activity based Off
- *     bit 4     Auto: Input-activity-based On; input-activity based Off
- *     bit 5     Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- *     bit 6     Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- *     bit 7     Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- *     bit 8     Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- *     bits 9-15 Reserved for future use
- *     Note: Only One bit can be set
- *  cbRES2, byte2  Currently active auto keyboard illumination triggers.
- *     bit 0     Any keystroke
- *     bit 1     Touchpad activity
- *     bit 2     Pointing stick
- *     bit 3     Any mouse
- *     bits 4-7  Reserved for future use
- *  cbRES2, byte3  Current Timeout
- *     bits 7:6  Timeout units indicator:
- *     00b       Seconds
- *     01b       Minutes
- *     10b       Hours
- *     11b       Days
- *     bits 5:0  Timeout value (0-63) in sec/min/hr/day
- *     NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte
- *     are set upon return from the [Get feature information] call.
- *  cbRES3, byte0  Current setting of ALS value that turns the light on or off.
- *  cbRES3, byte1  Current ALS reading
- *  cbRES3, byte2  Current keyboard light level.
- *
- * cbArg1 0x2 = Set New State
- *  cbRES1         Standard return codes (0, -1, -2)
- *  cbArg2, word0  Bitmap of current mode state
- *     bit 0     Always off (All systems)
- *     bit 1     Always on (Travis ATG, Siberia)
- *     bit 2     Auto: ALS-based On; ALS-based Off (Travis ATG)
- *     bit 3     Auto: ALS- and input-activity-based On; input-activity based Off
- *     bit 4     Auto: Input-activity-based On; input-activity based Off
- *     bit 5     Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- *     bit 6     Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- *     bit 7     Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- *     bit 8     Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- *     bits 9-15 Reserved for future use
- *     Note: Only One bit can be set
- *  cbArg2, byte2  Desired auto keyboard illumination triggers. Must remain inactive to allow
- *                 keyboard to turn off automatically.
- *     bit 0     Any keystroke
- *     bit 1     Touchpad activity
- *     bit 2     Pointing stick
- *     bit 3     Any mouse
- *     bits 4-7  Reserved for future use
- *  cbArg2, byte3  Desired Timeout
- *     bits 7:6  Timeout units indicator:
- *     00b       Seconds
- *     01b       Minutes
- *     10b       Hours
- *     11b       Days
- *     bits 5:0  Timeout value (0-63) in sec/min/hr/day
- *  cbArg3, byte0  Desired setting of ALS value that turns the light on or off.
- *  cbArg3, byte2  Desired keyboard light level.
- */
-
-
-enum kbd_timeout_unit {
-       KBD_TIMEOUT_SECONDS = 0,
-       KBD_TIMEOUT_MINUTES,
-       KBD_TIMEOUT_HOURS,
-       KBD_TIMEOUT_DAYS,
-};
-
-enum kbd_mode_bit {
-       KBD_MODE_BIT_OFF = 0,
-       KBD_MODE_BIT_ON,
-       KBD_MODE_BIT_ALS,
-       KBD_MODE_BIT_TRIGGER_ALS,
-       KBD_MODE_BIT_TRIGGER,
-       KBD_MODE_BIT_TRIGGER_25,
-       KBD_MODE_BIT_TRIGGER_50,
-       KBD_MODE_BIT_TRIGGER_75,
-       KBD_MODE_BIT_TRIGGER_100,
-};
-
-#define kbd_is_als_mode_bit(bit) \
-       ((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS)
-#define kbd_is_trigger_mode_bit(bit) \
-       ((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100)
-#define kbd_is_level_mode_bit(bit) \
-       ((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100)
-
-struct kbd_info {
-       u16 modes;
-       u8 type;
-       u8 triggers;
-       u8 levels;
-       u8 seconds;
-       u8 minutes;
-       u8 hours;
-       u8 days;
-};
-
-struct kbd_state {
-       u8 mode_bit;
-       u8 triggers;
-       u8 timeout_value;
-       u8 timeout_unit;
-       u8 als_setting;
-       u8 als_value;
-       u8 level;
-};
-
-static const int kbd_tokens[] = {
-       KBD_LED_OFF_TOKEN,
-       KBD_LED_AUTO_25_TOKEN,
-       KBD_LED_AUTO_50_TOKEN,
-       KBD_LED_AUTO_75_TOKEN,
-       KBD_LED_AUTO_100_TOKEN,
-       KBD_LED_ON_TOKEN,
-};
-
-static u16 kbd_token_bits;
-
-static struct kbd_info kbd_info;
-static bool kbd_als_supported;
-static bool kbd_triggers_supported;
-
-static u8 kbd_mode_levels[16];
-static int kbd_mode_levels_count;
-
-static u8 kbd_previous_level;
-static u8 kbd_previous_mode_bit;
-
-static bool kbd_led_present;
-
-/*
- * NOTE: there are three ways to set the keyboard backlight level.
- * First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value).
- * Second, via kbd_state.level (assigning numerical value <= kbd_info.levels).
- * Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens)
- *
- * There are laptops which support only one of these methods. If we want to
- * support as many machines as possible we need to implement all three methods.
- * The first two methods use the kbd_state structure. The third uses SMBIOS
- * tokens. If kbd_info.levels == 0, the machine does not support setting the
- * keyboard backlight level via kbd_state.level.
- */
-
-static int kbd_get_info(struct kbd_info *info)
-{
-       u8 units;
-       int ret;
-
-       get_buffer();
-
-       buffer->input[0] = 0x0;
-       dell_send_request(buffer, 4, 11);
-       ret = buffer->output[0];
-
-       if (ret) {
-               ret = dell_smi_error(ret);
-               goto out;
-       }
-
-       info->modes = buffer->output[1] & 0xFFFF;
-       info->type = (buffer->output[1] >> 24) & 0xFF;
-       info->triggers = buffer->output[2] & 0xFF;
-       units = (buffer->output[2] >> 8) & 0xFF;
-       info->levels = (buffer->output[2] >> 16) & 0xFF;
-
-       if (units & BIT(0))
-               info->seconds = (buffer->output[3] >> 0) & 0xFF;
-       if (units & BIT(1))
-               info->minutes = (buffer->output[3] >> 8) & 0xFF;
-       if (units & BIT(2))
-               info->hours = (buffer->output[3] >> 16) & 0xFF;
-       if (units & BIT(3))
-               info->days = (buffer->output[3] >> 24) & 0xFF;
-
- out:
-       release_buffer();
-       return ret;
-}
-
-static unsigned int kbd_get_max_level(void)
-{
-       if (kbd_info.levels != 0)
-               return kbd_info.levels;
-       if (kbd_mode_levels_count > 0)
-               return kbd_mode_levels_count - 1;
-       return 0;
-}
-
-static int kbd_get_level(struct kbd_state *state)
-{
-       int i;
-
-       if (kbd_info.levels != 0)
-               return state->level;
-
-       if (kbd_mode_levels_count > 0) {
-               for (i = 0; i < kbd_mode_levels_count; ++i)
-                       if (kbd_mode_levels[i] == state->mode_bit)
-                               return i;
-               return 0;
-       }
-
-       return -EINVAL;
-}
-
-static int kbd_set_level(struct kbd_state *state, u8 level)
-{
-       if (kbd_info.levels != 0) {
-               if (level != 0)
-                       kbd_previous_level = level;
-               if (state->level == level)
-                       return 0;
-               state->level = level;
-               if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF)
-                       state->mode_bit = kbd_previous_mode_bit;
-               else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) {
-                       kbd_previous_mode_bit = state->mode_bit;
-                       state->mode_bit = KBD_MODE_BIT_OFF;
-               }
-               return 0;
-       }
-
-       if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) {
-               if (level != 0)
-                       kbd_previous_level = level;
-               state->mode_bit = kbd_mode_levels[level];
-               return 0;
-       }
-
-       return -EINVAL;
-}
-
-static int kbd_get_state(struct kbd_state *state)
-{
-       int ret;
-
-       get_buffer();
-
-       buffer->input[0] = 0x1;
-       dell_send_request(buffer, 4, 11);
-       ret = buffer->output[0];
-
-       if (ret) {
-               ret = dell_smi_error(ret);
-               goto out;
-       }
-
-       state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
-       if (state->mode_bit != 0)
-               state->mode_bit--;
-
-       state->triggers = (buffer->output[1] >> 16) & 0xFF;
-       state->timeout_value = (buffer->output[1] >> 24) & 0x3F;
-       state->timeout_unit = (buffer->output[1] >> 30) & 0x3;
-       state->als_setting = buffer->output[2] & 0xFF;
-       state->als_value = (buffer->output[2] >> 8) & 0xFF;
-       state->level = (buffer->output[2] >> 16) & 0xFF;
-
- out:
-       release_buffer();
-       return ret;
-}
-
-static int kbd_set_state(struct kbd_state *state)
-{
-       int ret;
-
-       get_buffer();
-       buffer->input[0] = 0x2;
-       buffer->input[1] = BIT(state->mode_bit) & 0xFFFF;
-       buffer->input[1] |= (state->triggers & 0xFF) << 16;
-       buffer->input[1] |= (state->timeout_value & 0x3F) << 24;
-       buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
-       buffer->input[2] = state->als_setting & 0xFF;
-       buffer->input[2] |= (state->level & 0xFF) << 16;
-       dell_send_request(buffer, 4, 11);
-       ret = buffer->output[0];
-       release_buffer();
-
-       return dell_smi_error(ret);
-}
-
-static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
-{
-       int ret;
-
-       ret = kbd_set_state(state);
-       if (ret == 0)
-               return 0;
-
-       /*
-        * When setting the new state fails,try to restore the previous one.
-        * This is needed on some machines where BIOS sets a default state when
-        * setting a new state fails. This default state could be all off.
-        */
-
-       if (kbd_set_state(old))
-               pr_err("Setting old previous keyboard state failed\n");
-
-       return ret;
-}
-
-static int kbd_set_token_bit(u8 bit)
-{
-       int id;
-       int ret;
-
-       if (bit >= ARRAY_SIZE(kbd_tokens))
-               return -EINVAL;
-
-       id = find_token_id(kbd_tokens[bit]);
-       if (id == -1)
-               return -EINVAL;
-
-       get_buffer();
-       buffer->input[0] = da_tokens[id].location;
-       buffer->input[1] = da_tokens[id].value;
-       dell_send_request(buffer, 1, 0);
-       ret = buffer->output[0];
-       release_buffer();
-
-       return dell_smi_error(ret);
-}
-
-static int kbd_get_token_bit(u8 bit)
-{
-       int id;
-       int ret;
-       int val;
-
-       if (bit >= ARRAY_SIZE(kbd_tokens))
-               return -EINVAL;
-
-       id = find_token_id(kbd_tokens[bit]);
-       if (id == -1)
-               return -EINVAL;
-
-       get_buffer();
-       buffer->input[0] = da_tokens[id].location;
-       dell_send_request(buffer, 0, 0);
-       ret = buffer->output[0];
-       val = buffer->output[1];
-       release_buffer();
-
-       if (ret)
-               return dell_smi_error(ret);
-
-       return (val == da_tokens[id].value);
-}
-
-static int kbd_get_first_active_token_bit(void)
-{
-       int i;
-       int ret;
-
-       for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) {
-               ret = kbd_get_token_bit(i);
-               if (ret == 1)
-                       return i;
-       }
-
-       return ret;
-}
-
-static int kbd_get_valid_token_counts(void)
-{
-       return hweight16(kbd_token_bits);
-}
-
-static inline int kbd_init_info(void)
-{
-       struct kbd_state state;
-       int ret;
-       int i;
-
-       ret = kbd_get_info(&kbd_info);
-       if (ret)
-               return ret;
-
-       kbd_get_state(&state);
-
-       /* NOTE: timeout value is stored in 6 bits so max value is 63 */
-       if (kbd_info.seconds > 63)
-               kbd_info.seconds = 63;
-       if (kbd_info.minutes > 63)
-               kbd_info.minutes = 63;
-       if (kbd_info.hours > 63)
-               kbd_info.hours = 63;
-       if (kbd_info.days > 63)
-               kbd_info.days = 63;
-
-       /* NOTE: On tested machines ON mode did not work and caused
-        *       problems (turned backlight off) so do not use it
-        */
-       kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON);
-
-       kbd_previous_level = kbd_get_level(&state);
-       kbd_previous_mode_bit = state.mode_bit;
-
-       if (kbd_previous_level == 0 && kbd_get_max_level() != 0)
-               kbd_previous_level = 1;
-
-       if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) {
-               kbd_previous_mode_bit =
-                       ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF));
-               if (kbd_previous_mode_bit != 0)
-                       kbd_previous_mode_bit--;
-       }
-
-       if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) |
-                             BIT(KBD_MODE_BIT_TRIGGER_ALS)))
-               kbd_als_supported = true;
-
-       if (kbd_info.modes & (
-           BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) |
-           BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) |
-           BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100)
-          ))
-               kbd_triggers_supported = true;
-
-       /* kbd_mode_levels[0] is reserved, see below */
-       for (i = 0; i < 16; ++i)
-               if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes))
-                       kbd_mode_levels[1 + kbd_mode_levels_count++] = i;
-
-       /*
-        * Find the first supported mode and assign to kbd_mode_levels[0].
-        * This should be 0 (off), but we cannot depend on the BIOS to
-        * support 0.
-        */
-       if (kbd_mode_levels_count > 0) {
-               for (i = 0; i < 16; ++i) {
-                       if (BIT(i) & kbd_info.modes) {
-                               kbd_mode_levels[0] = i;
-                               break;
-                       }
-               }
-               kbd_mode_levels_count++;
-       }
-
-       return 0;
-
-}
-
-static inline void kbd_init_tokens(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i)
-               if (find_token_id(kbd_tokens[i]) != -1)
-                       kbd_token_bits |= BIT(i);
-}
-
-static void kbd_init(void)
-{
-       int ret;
-
-       ret = kbd_init_info();
-       kbd_init_tokens();
-
-       if (kbd_token_bits != 0 || ret == 0)
-               kbd_led_present = true;
-}
-
-static ssize_t kbd_led_timeout_store(struct device *dev,
-                                    struct device_attribute *attr,
-                                    const char *buf, size_t count)
-{
-       struct kbd_state new_state;
-       struct kbd_state state;
-       bool convert;
-       int value;
-       int ret;
-       char ch;
-       u8 unit;
-       int i;
-
-       ret = sscanf(buf, "%d %c", &value, &ch);
-       if (ret < 1)
-               return -EINVAL;
-       else if (ret == 1)
-               ch = 's';
-
-       if (value < 0)
-               return -EINVAL;
-
-       convert = false;
-
-       switch (ch) {
-       case 's':
-               if (value > kbd_info.seconds)
-                       convert = true;
-               unit = KBD_TIMEOUT_SECONDS;
-               break;
-       case 'm':
-               if (value > kbd_info.minutes)
-                       convert = true;
-               unit = KBD_TIMEOUT_MINUTES;
-               break;
-       case 'h':
-               if (value > kbd_info.hours)
-                       convert = true;
-               unit = KBD_TIMEOUT_HOURS;
-               break;
-       case 'd':
-               if (value > kbd_info.days)
-                       convert = true;
-               unit = KBD_TIMEOUT_DAYS;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       if (quirks && quirks->needs_kbd_timeouts)
-               convert = true;
-
-       if (convert) {
-               /* Convert value from current units to seconds */
-               switch (unit) {
-               case KBD_TIMEOUT_DAYS:
-                       value *= 24;
-               case KBD_TIMEOUT_HOURS:
-                       value *= 60;
-               case KBD_TIMEOUT_MINUTES:
-                       value *= 60;
-                       unit = KBD_TIMEOUT_SECONDS;
-               }
-
-               if (quirks && quirks->needs_kbd_timeouts) {
-                       for (i = 0; quirks->kbd_timeouts[i] != -1; i++) {
-                               if (value <= quirks->kbd_timeouts[i]) {
-                                       value = quirks->kbd_timeouts[i];
-                                       break;
-                               }
-                       }
-               }
-
-               if (value <= kbd_info.seconds && kbd_info.seconds) {
-                       unit = KBD_TIMEOUT_SECONDS;
-               } else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) {
-                       value /= 60;
-                       unit = KBD_TIMEOUT_MINUTES;
-               } else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) {
-                       value /= (60 * 60);
-                       unit = KBD_TIMEOUT_HOURS;
-               } else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) {
-                       value /= (60 * 60 * 24);
-                       unit = KBD_TIMEOUT_DAYS;
-               } else {
-                       return -EINVAL;
-               }
-       }
-
-       ret = kbd_get_state(&state);
-       if (ret)
-               return ret;
-
-       new_state = state;
-       new_state.timeout_value = value;
-       new_state.timeout_unit = unit;
-
-       ret = kbd_set_state_safe(&new_state, &state);
-       if (ret)
-               return ret;
-
-       return count;
-}
-
-static ssize_t kbd_led_timeout_show(struct device *dev,
-                                   struct device_attribute *attr, char *buf)
-{
-       struct kbd_state state;
-       int ret;
-       int len;
-
-       ret = kbd_get_state(&state);
-       if (ret)
-               return ret;
-
-       len = sprintf(buf, "%d", state.timeout_value);
-
-       switch (state.timeout_unit) {
-       case KBD_TIMEOUT_SECONDS:
-               return len + sprintf(buf+len, "s\n");
-       case KBD_TIMEOUT_MINUTES:
-               return len + sprintf(buf+len, "m\n");
-       case KBD_TIMEOUT_HOURS:
-               return len + sprintf(buf+len, "h\n");
-       case KBD_TIMEOUT_DAYS:
-               return len + sprintf(buf+len, "d\n");
-       default:
-               return -EINVAL;
-       }
-
-       return len;
-}
-
-static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR,
-                  kbd_led_timeout_show, kbd_led_timeout_store);
-
-static const char * const kbd_led_triggers[] = {
-       "keyboard",
-       "touchpad",
-       /*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */
-       "mouse",
-};
-
-static ssize_t kbd_led_triggers_store(struct device *dev,
-                                     struct device_attribute *attr,
-                                     const char *buf, size_t count)
-{
-       struct kbd_state new_state;
-       struct kbd_state state;
-       bool triggers_enabled = false;
-       bool als_enabled = false;
-       bool disable_als = false;
-       bool enable_als = false;
-       int trigger_bit = -1;
-       char trigger[21];
-       int i, ret;
-
-       ret = sscanf(buf, "%20s", trigger);
-       if (ret != 1)
-               return -EINVAL;
-
-       if (trigger[0] != '+' && trigger[0] != '-')
-               return -EINVAL;
-
-       ret = kbd_get_state(&state);
-       if (ret)
-               return ret;
-
-       if (kbd_als_supported)
-               als_enabled = kbd_is_als_mode_bit(state.mode_bit);
-
-       if (kbd_triggers_supported)
-               triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
-
-       if (kbd_als_supported) {
-               if (strcmp(trigger, "+als") == 0) {
-                       if (als_enabled)
-                               return count;
-                       enable_als = true;
-               } else if (strcmp(trigger, "-als") == 0) {
-                       if (!als_enabled)
-                               return count;
-                       disable_als = true;
-               }
-       }
-
-       if (enable_als || disable_als) {
-               new_state = state;
-               if (enable_als) {
-                       if (triggers_enabled)
-                               new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
-                       else
-                               new_state.mode_bit = KBD_MODE_BIT_ALS;
-               } else {
-                       if (triggers_enabled) {
-                               new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
-                               kbd_set_level(&new_state, kbd_previous_level);
-                       } else {
-                               new_state.mode_bit = KBD_MODE_BIT_ON;
-                       }
-               }
-               if (!(kbd_info.modes & BIT(new_state.mode_bit)))
-                       return -EINVAL;
-               ret = kbd_set_state_safe(&new_state, &state);
-               if (ret)
-                       return ret;
-               kbd_previous_mode_bit = new_state.mode_bit;
-               return count;
-       }
-
-       if (kbd_triggers_supported) {
-               for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
-                       if (!(kbd_info.triggers & BIT(i)))
-                               continue;
-                       if (!kbd_led_triggers[i])
-                               continue;
-                       if (strcmp(trigger+1, kbd_led_triggers[i]) != 0)
-                               continue;
-                       if (trigger[0] == '+' &&
-                           triggers_enabled && (state.triggers & BIT(i)))
-                               return count;
-                       if (trigger[0] == '-' &&
-                           (!triggers_enabled || !(state.triggers & BIT(i))))
-                               return count;
-                       trigger_bit = i;
-                       break;
-               }
-       }
-
-       if (trigger_bit != -1) {
-               new_state = state;
-               if (trigger[0] == '+')
-                       new_state.triggers |= BIT(trigger_bit);
-               else {
-                       new_state.triggers &= ~BIT(trigger_bit);
-                       /* NOTE: trackstick bit (2) must be disabled when
-                        *       disabling touchpad bit (1), otherwise touchpad
-                        *       bit (1) will not be disabled */
-                       if (trigger_bit == 1)
-                               new_state.triggers &= ~BIT(2);
-               }
-               if ((kbd_info.triggers & new_state.triggers) !=
-                   new_state.triggers)
-                       return -EINVAL;
-               if (new_state.triggers && !triggers_enabled) {
-                       if (als_enabled)
-                               new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
-                       else {
-                               new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
-                               kbd_set_level(&new_state, kbd_previous_level);
-                       }
-               } else if (new_state.triggers == 0) {
-                       if (als_enabled)
-                               new_state.mode_bit = KBD_MODE_BIT_ALS;
-                       else
-                               kbd_set_level(&new_state, 0);
-               }
-               if (!(kbd_info.modes & BIT(new_state.mode_bit)))
-                       return -EINVAL;
-               ret = kbd_set_state_safe(&new_state, &state);
-               if (ret)
-                       return ret;
-               if (new_state.mode_bit != KBD_MODE_BIT_OFF)
-                       kbd_previous_mode_bit = new_state.mode_bit;
-               return count;
-       }
-
-       return -EINVAL;
-}
-
-static ssize_t kbd_led_triggers_show(struct device *dev,
-                                    struct device_attribute *attr, char *buf)
-{
-       struct kbd_state state;
-       bool triggers_enabled;
-       int level, i, ret;
-       int len = 0;
-
-       ret = kbd_get_state(&state);
-       if (ret)
-               return ret;
-
-       len = 0;
-
-       if (kbd_triggers_supported) {
-               triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
-               level = kbd_get_level(&state);
-               for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
-                       if (!(kbd_info.triggers & BIT(i)))
-                               continue;
-                       if (!kbd_led_triggers[i])
-                               continue;
-                       if ((triggers_enabled || level <= 0) &&
-                           (state.triggers & BIT(i)))
-                               buf[len++] = '+';
-                       else
-                               buf[len++] = '-';
-                       len += sprintf(buf+len, "%s ", kbd_led_triggers[i]);
-               }
-       }
-
-       if (kbd_als_supported) {
-               if (kbd_is_als_mode_bit(state.mode_bit))
-                       len += sprintf(buf+len, "+als ");
-               else
-                       len += sprintf(buf+len, "-als ");
-       }
-
-       if (len)
-               buf[len - 1] = '\n';
-
-       return len;
-}
-
-static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR,
-                  kbd_led_triggers_show, kbd_led_triggers_store);
-
-static ssize_t kbd_led_als_store(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf, size_t count)
-{
-       struct kbd_state state;
-       struct kbd_state new_state;
-       u8 setting;
-       int ret;
-
-       ret = kstrtou8(buf, 10, &setting);
-       if (ret)
-               return ret;
-
-       ret = kbd_get_state(&state);
-       if (ret)
-               return ret;
-
-       new_state = state;
-       new_state.als_setting = setting;
-
-       ret = kbd_set_state_safe(&new_state, &state);
-       if (ret)
-               return ret;
-
-       return count;
-}
-
-static ssize_t kbd_led_als_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct kbd_state state;
-       int ret;
-
-       ret = kbd_get_state(&state);
-       if (ret)
-               return ret;
-
-       return sprintf(buf, "%d\n", state.als_setting);
-}
-
-static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR,
-                  kbd_led_als_show, kbd_led_als_store);
-
-static struct attribute *kbd_led_attrs[] = {
-       &dev_attr_stop_timeout.attr,
-       &dev_attr_start_triggers.attr,
-       &dev_attr_als_setting.attr,
-       NULL,
-};
-ATTRIBUTE_GROUPS(kbd_led);
-
-static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
-{
-       int ret;
-       u16 num;
-       struct kbd_state state;
-
-       if (kbd_get_max_level()) {
-               ret = kbd_get_state(&state);
-               if (ret)
-                       return 0;
-               ret = kbd_get_level(&state);
-               if (ret < 0)
-                       return 0;
-               return ret;
-       }
-
-       if (kbd_get_valid_token_counts()) {
-               ret = kbd_get_first_active_token_bit();
-               if (ret < 0)
-                       return 0;
-               for (num = kbd_token_bits; num != 0 && ret > 0; --ret)
-                       num &= num - 1; /* clear the first bit set */
-               if (num == 0)
-                       return 0;
-               return ffs(num) - 1;
-       }
-
-       pr_warn("Keyboard brightness level control not supported\n");
-       return 0;
-}
-
-static void kbd_led_level_set(struct led_classdev *led_cdev,
-                             enum led_brightness value)
-{
-       struct kbd_state state;
-       struct kbd_state new_state;
-       u16 num;
-
-       if (kbd_get_max_level()) {
-               if (kbd_get_state(&state))
-                       return;
-               new_state = state;
-               if (kbd_set_level(&new_state, value))
-                       return;
-               kbd_set_state_safe(&new_state, &state);
-               return;
-       }
-
-       if (kbd_get_valid_token_counts()) {
-               for (num = kbd_token_bits; num != 0 && value > 0; --value)
-                       num &= num - 1; /* clear the first bit set */
-               if (num == 0)
-                       return;
-               kbd_set_token_bit(ffs(num) - 1);
-               return;
-       }
-
-       pr_warn("Keyboard brightness level control not supported\n");
-}
-
-static struct led_classdev kbd_led = {
-       .name           = "dell::kbd_backlight",
-       .brightness_set = kbd_led_level_set,
-       .brightness_get = kbd_led_level_get,
-       .groups         = kbd_led_groups,
-};
-
-static int __init kbd_led_init(struct device *dev)
-{
-       kbd_init();
-       if (!kbd_led_present)
-               return -ENODEV;
-       kbd_led.max_brightness = kbd_get_max_level();
-       if (!kbd_led.max_brightness) {
-               kbd_led.max_brightness = kbd_get_valid_token_counts();
-               if (kbd_led.max_brightness)
-                       kbd_led.max_brightness--;
-       }
-       return led_classdev_register(dev, &kbd_led);
-}
-
-static void brightness_set_exit(struct led_classdev *led_cdev,
-                               enum led_brightness value)
-{
-       /* Don't change backlight level on exit */
-};
-
-static void kbd_led_exit(void)
-{
-       if (!kbd_led_present)
-               return;
-       kbd_led.brightness_set = brightness_set_exit;
-       led_classdev_unregister(&kbd_led);
-}
-
 static int __init dell_init(void)
 {
        int max_intensity = 0;
@@ -1879,8 +841,6 @@ static int __init dell_init(void)
        if (quirks && quirks->touchpad_led)
                touchpad_led_init(&platform_device->dev);
 
-       kbd_led_init(&platform_device->dev);
-
        dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
        if (dell_laptop_dir != NULL)
                debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
@@ -1948,7 +908,6 @@ static void __exit dell_exit(void)
        debugfs_remove_recursive(dell_laptop_dir);
        if (quirks && quirks->touchpad_led)
                touchpad_led_exit();
-       kbd_led_exit();
        i8042_remove_filter(dell_laptop_i8042_filter);
        cancel_delayed_work_sync(&dell_rfkill_work);
        backlight_device_unregister(dell_backlight_device);
@@ -1965,7 +924,5 @@ module_init(dell_init);
 module_exit(dell_exit);
 
 MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
-MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
 MODULE_DESCRIPTION("Dell laptop driver");
 MODULE_LICENSE("GPL");
index e225711bb8bc0009114d6ec0d95ab8d2bb2d68f3..9c48fb32f6601bf4065db65cd2a099bc57f3812d 100644 (file)
@@ -1488,7 +1488,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id)
 }
 EXPORT_SYMBOL_GPL(regulator_get_optional);
 
-/* Locks held by regulator_put() */
+/* regulator_list_mutex lock held by regulator_put() */
 static void _regulator_put(struct regulator *regulator)
 {
        struct regulator_dev *rdev;
@@ -1503,12 +1503,14 @@ static void _regulator_put(struct regulator *regulator)
        /* remove any sysfs entries */
        if (regulator->dev)
                sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
+       mutex_lock(&rdev->mutex);
        kfree(regulator->supply_name);
        list_del(&regulator->list);
        kfree(regulator);
 
        rdev->open_count--;
        rdev->exclusive = 0;
+       mutex_unlock(&rdev->mutex);
 
        module_put(rdev->owner);
 }
index 2809ae0d6bcd9848bd15cbc8d4d45e69df1601a3..ff828117798fd3f4775cd5cc6c8e86d8fbe33a00 100644 (file)
@@ -405,6 +405,40 @@ static struct regulator_ops s2mps14_reg_ops;
        .enable_mask    = S2MPS14_ENABLE_MASK                   \
 }
 
+#define regulator_desc_s2mps13_buck7(num, min, step, min_sel) {        \
+       .name           = "BUCK"#num,                           \
+       .id             = S2MPS13_BUCK##num,                    \
+       .ops            = &s2mps14_reg_ops,                     \
+       .type           = REGULATOR_VOLTAGE,                    \
+       .owner          = THIS_MODULE,                          \
+       .min_uV         = min,                                  \
+       .uV_step        = step,                                 \
+       .linear_min_sel = min_sel,                              \
+       .n_voltages     = S2MPS14_BUCK_N_VOLTAGES,              \
+       .ramp_delay     = S2MPS13_BUCK_RAMP_DELAY,              \
+       .vsel_reg       = S2MPS13_REG_B1OUT + (num) * 2 - 1,    \
+       .vsel_mask      = S2MPS14_BUCK_VSEL_MASK,               \
+       .enable_reg     = S2MPS13_REG_B1CTRL + (num - 1) * 2,   \
+       .enable_mask    = S2MPS14_ENABLE_MASK                   \
+}
+
+#define regulator_desc_s2mps13_buck8_10(num, min, step, min_sel) {     \
+       .name           = "BUCK"#num,                           \
+       .id             = S2MPS13_BUCK##num,                    \
+       .ops            = &s2mps14_reg_ops,                     \
+       .type           = REGULATOR_VOLTAGE,                    \
+       .owner          = THIS_MODULE,                          \
+       .min_uV         = min,                                  \
+       .uV_step        = step,                                 \
+       .linear_min_sel = min_sel,                              \
+       .n_voltages     = S2MPS14_BUCK_N_VOLTAGES,              \
+       .ramp_delay     = S2MPS13_BUCK_RAMP_DELAY,              \
+       .vsel_reg       = S2MPS13_REG_B1OUT + (num) * 2 - 1,    \
+       .vsel_mask      = S2MPS14_BUCK_VSEL_MASK,               \
+       .enable_reg     = S2MPS13_REG_B1CTRL + (num) * 2 - 1,   \
+       .enable_mask    = S2MPS14_ENABLE_MASK                   \
+}
+
 static const struct regulator_desc s2mps13_regulators[] = {
        regulator_desc_s2mps13_ldo(1,  MIN_800_MV,  STEP_12_5_MV, 0x00),
        regulator_desc_s2mps13_ldo(2,  MIN_1400_MV, STEP_50_MV,   0x0C),
@@ -452,10 +486,10 @@ static const struct regulator_desc s2mps13_regulators[] = {
        regulator_desc_s2mps13_buck(4,  MIN_500_MV,  STEP_6_25_MV, 0x10),
        regulator_desc_s2mps13_buck(5,  MIN_500_MV,  STEP_6_25_MV, 0x10),
        regulator_desc_s2mps13_buck(6,  MIN_500_MV,  STEP_6_25_MV, 0x10),
-       regulator_desc_s2mps13_buck(7,  MIN_500_MV,  STEP_6_25_MV, 0x10),
-       regulator_desc_s2mps13_buck(8,  MIN_1000_MV, STEP_12_5_MV, 0x20),
-       regulator_desc_s2mps13_buck(9,  MIN_1000_MV, STEP_12_5_MV, 0x20),
-       regulator_desc_s2mps13_buck(10, MIN_500_MV,  STEP_6_25_MV, 0x10),
+       regulator_desc_s2mps13_buck7(7,  MIN_500_MV,  STEP_6_25_MV, 0x10),
+       regulator_desc_s2mps13_buck8_10(8,  MIN_1000_MV, STEP_12_5_MV, 0x20),
+       regulator_desc_s2mps13_buck8_10(9,  MIN_1000_MV, STEP_12_5_MV, 0x20),
+       regulator_desc_s2mps13_buck8_10(10, MIN_500_MV,  STEP_6_25_MV, 0x10),
 };
 
 static int s2mps14_regulator_enable(struct regulator_dev *rdev)
index eebc52cb69849ea99f07c9c6694389a92e39afa5..3d95c87160b35003635460d5d67379d3da33d27f 100644 (file)
@@ -102,6 +102,8 @@ static int sunxi_reset_init(struct device_node *np)
                goto err_alloc;
        }
 
+       spin_lock_init(&data->lock);
+
        data->rcdev.owner = THIS_MODULE;
        data->rcdev.nr_resets = size * 32;
        data->rcdev.ops = &sunxi_reset_ops;
@@ -157,6 +159,8 @@ static int sunxi_reset_probe(struct platform_device *pdev)
        if (IS_ERR(data->membase))
                return PTR_ERR(data->membase);
 
+       spin_lock_init(&data->lock);
+
        data->rcdev.owner = THIS_MODULE;
        data->rcdev.nr_resets = resource_size(res) * 32;
        data->rcdev.ops = &sunxi_reset_ops;
index b5e7c4670205ba15e82cba6f2325d5cd5b994bcc..89ac1d5083c66093b63caa973e37ff4897a1bb6b 100644 (file)
@@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
 static const struct platform_device_id s5m_rtc_id[] = {
        { "s5m-rtc",            S5M8767X },
        { "s2mps14-rtc",        S2MPS14X },
+       { },
 };
 
 static struct platform_driver s5m_rtc_driver = {
index 91e97ec0141892cbf4d1676480d5fda3223b0e6b..4d41bf75c23318577638fa45493aab748e0473a7 100644 (file)
@@ -1163,9 +1163,13 @@ static inline int ap_test_config_card_id(unsigned int id)
  */
 static inline int ap_test_config_domain(unsigned int domain)
 {
-       if (!ap_configuration)
-               return 1;
-       return ap_test_config(ap_configuration->aqm, domain);
+       if (!ap_configuration)    /* QCI not supported */
+               if (domain < 16)
+                       return 1; /* then domains 0...15 are configured */
+               else
+                       return 0;
+       else
+               return ap_test_config(ap_configuration->aqm, domain);
 }
 
 /**
index 213e54ee8a6660de47b05825f4ba5265bccd3463..d609ca09aa943754e45bc8ca815c7e668bdac985 100644 (file)
@@ -109,10 +109,8 @@ static debug_info_t *claw_dbf_trace;
 static void
 claw_unregister_debug_facility(void)
 {
-       if (claw_dbf_setup)
-               debug_unregister(claw_dbf_setup);
-       if (claw_dbf_trace)
-               debug_unregister(claw_dbf_trace);
+       debug_unregister(claw_dbf_setup);
+       debug_unregister(claw_dbf_trace);
 }
 
 static int
index fb92524d24ef9ac1e72a6bf0912a5092e15433fd..fd5944bbe224964a5afce64c1a82ba4d33c27dc2 100644 (file)
@@ -251,13 +251,11 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg)
        int first = 1;
        int i;
        unsigned long duration;
-       struct timespec done_stamp = current_kernel_time(); /* xtime */
+       unsigned long done_stamp = jiffies;
 
        CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
 
-       duration =
-           (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
-           (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
+       duration = done_stamp - ch->prof.send_stamp;
        if (duration > ch->prof.tx_time)
                ch->prof.tx_time = duration;
 
@@ -307,7 +305,7 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg)
                spin_unlock(&ch->collect_lock);
                ch->ccw[1].count = ch->trans_skb->len;
                fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
-               ch->prof.send_stamp = current_kernel_time(); /* xtime */
+               ch->prof.send_stamp = jiffies;
                rc = ccw_device_start(ch->cdev, &ch->ccw[0],
                                                (unsigned long)ch, 0xff, 0);
                ch->prof.doios_multi++;
@@ -1229,14 +1227,12 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
        int             rc;
        struct th_header *header;
        struct pdu      *p_header;
-       struct timespec done_stamp = current_kernel_time(); /* xtime */
+       unsigned long done_stamp = jiffies;
 
        CTCM_PR_DEBUG("Enter %s: %s cp:%i\n",
                        __func__, dev->name, smp_processor_id());
 
-       duration =
-               (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
-               (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
+       duration = done_stamp - ch->prof.send_stamp;
        if (duration > ch->prof.tx_time)
                ch->prof.tx_time = duration;
 
@@ -1361,7 +1357,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
 
        ch->ccw[1].count = ch->trans_skb->len;
        fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
-       ch->prof.send_stamp = current_kernel_time(); /* xtime */
+       ch->prof.send_stamp = jiffies;
        if (do_debug_ccw)
                ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
        rc = ccw_device_start(ch->cdev, &ch->ccw[0],
@@ -1827,7 +1823,7 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
        fsm_newstate(wch->fsm, CTC_STATE_TX);
 
        spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
-       wch->prof.send_stamp = current_kernel_time(); /* xtime */
+       wch->prof.send_stamp = jiffies;
        rc = ccw_device_start(wch->cdev, &wch->ccw[3],
                                        (unsigned long) wch, 0xff, 0);
        spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
index e056dd4fe44d1814d153d215af21fa13f44888a2..05c37d6d4afef27cdb182dbb7621ee7ef4fd2490 100644 (file)
@@ -567,7 +567,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
        fsm_newstate(ch->fsm, CTC_STATE_TX);
        fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
        spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
-       ch->prof.send_stamp = current_kernel_time(); /* xtime */
+       ch->prof.send_stamp = jiffies;
        rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
                                        (unsigned long)ch, 0xff, 0);
        spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
@@ -831,7 +831,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
                                        sizeof(struct ccw1) * 3);
 
        spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
-       ch->prof.send_stamp = current_kernel_time(); /* xtime */
+       ch->prof.send_stamp = jiffies;
        rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
                                        (unsigned long)ch, 0xff, 0);
        spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
index 477c933685f38f4abdba98a557aca20d43b7b7ef..6f4417c80247272a6621dcfd60ca396441e37a2f 100644 (file)
@@ -121,7 +121,7 @@ struct ctcm_profile {
        unsigned long doios_multi;
        unsigned long txlen;
        unsigned long tx_time;
-       struct timespec send_stamp;
+       unsigned long send_stamp;
 };
 
 /*
index 47773c4d235a7cc3787a116887e115d4d3997e21..ddb0aa321339f19cccd4741254f9fb3ebcf1b3d1 100644 (file)
@@ -100,8 +100,8 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
                     priv->channel[WRITE]->prof.doios_multi);
        p += sprintf(p, "  Netto bytes written: %ld\n",
                     priv->channel[WRITE]->prof.txlen);
-       p += sprintf(p, "  Max. TX IO-time: %ld\n",
-                    priv->channel[WRITE]->prof.tx_time);
+       p += sprintf(p, "  Max. TX IO-time: %u\n",
+                    jiffies_to_usecs(priv->channel[WRITE]->prof.tx_time));
 
        printk(KERN_INFO "Statistics for %s:\n%s",
                                priv->channel[CTCM_WRITE]->netdev->name, sbuf);
index 92190aa20b9fa1c8940bbdf79c2190bc99033e3d..00b7d9c9fe485a1c4553cda878b519526371e8b0 100644 (file)
@@ -88,10 +88,8 @@ static debug_info_t *lcs_dbf_trace;
 static void
 lcs_unregister_debug_facility(void)
 {
-       if (lcs_dbf_setup)
-               debug_unregister(lcs_dbf_setup);
-       if (lcs_dbf_trace)
-               debug_unregister(lcs_dbf_trace);
+       debug_unregister(lcs_dbf_setup);
+       debug_unregister(lcs_dbf_trace);
 }
 
 static int
index 0a87809c8af7380374b496ab3b90f76a8522ba98..33f7040d711d6f66aaf8dbc96987664bd197c134 100644 (file)
@@ -178,7 +178,7 @@ struct connection_profile {
        unsigned long doios_multi;
        unsigned long txlen;
        unsigned long tx_time;
-       struct timespec send_stamp;
+       unsigned long send_stamp;
        unsigned long tx_pending;
        unsigned long tx_max_pending;
 };
@@ -487,12 +487,9 @@ DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
 
 static void iucv_unregister_dbf_views(void)
 {
-       if (iucv_dbf_setup)
-               debug_unregister(iucv_dbf_setup);
-       if (iucv_dbf_data)
-               debug_unregister(iucv_dbf_data);
-       if (iucv_dbf_trace)
-               debug_unregister(iucv_dbf_trace);
+       debug_unregister(iucv_dbf_setup);
+       debug_unregister(iucv_dbf_data);
+       debug_unregister(iucv_dbf_trace);
 }
 static int iucv_register_dbf_views(void)
 {
@@ -786,7 +783,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
 
        header.next = 0;
        memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
-       conn->prof.send_stamp = current_kernel_time();
+       conn->prof.send_stamp = jiffies;
        txmsg.class = 0;
        txmsg.tag = 0;
        rc = iucv_message_send(conn->path, &txmsg, 0, 0,
@@ -1220,7 +1217,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
                memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header,  NETIUCV_HDRLEN);
 
                fsm_newstate(conn->fsm, CONN_STATE_TX);
-               conn->prof.send_stamp = current_kernel_time();
+               conn->prof.send_stamp = jiffies;
 
                msg.tag = 1;
                msg.class = 0;
index 7a8bb9f78e7639d915d478e0ee0cc11b3f905f4a..3abac028899f10d97d448122ce7696050f36dfe0 100644 (file)
@@ -596,7 +596,6 @@ struct qeth_channel {
        struct ccw1 ccw;
        spinlock_t iob_lock;
        wait_queue_head_t wait_q;
-       struct tasklet_struct irq_tasklet;
        struct ccw_device *ccwdev;
 /*command buffer for control data*/
        struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
index f407e3763432648f3271a80e2d432b7da8736c65..642c77c76b8432d07534e393335353cfbc27233a 100644 (file)
@@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
        QETH_DBF_TEXT(SETUP, 2, "idxanswr");
        card = CARD_FROM_CDEV(channel->ccwdev);
        iob = qeth_get_buffer(channel);
+       if (!iob)
+               return -ENOMEM;
        iob->callback = idx_reply_cb;
        memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
        channel->ccw.count = QETH_BUFSIZE;
@@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
        QETH_DBF_TEXT(SETUP, 2, "idxactch");
 
        iob = qeth_get_buffer(channel);
+       if (!iob)
+               return -ENOMEM;
        iob->callback = idx_reply_cb;
        memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
        channel->ccw.count = IDX_ACTIVATE_SIZE;
@@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len,
 }
 EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
 
+/**
+ * qeth_send_control_data() -  send control command to the card
+ * @card:                      qeth_card structure pointer
+ * @len:                       size of the command buffer
+ * @iob:                       qeth_cmd_buffer pointer
+ * @reply_cb:                  callback function pointer
+ * @cb_card:                   pointer to the qeth_card structure
+ * @cb_reply:                  pointer to the qeth_reply structure
+ * @cb_cmd:                    pointer to the original iob for non-IPA
+ *                             commands, or to the qeth_ipa_cmd structure
+ *                             for the IPA commands.
+ * @reply_param:               private pointer passed to the callback
+ *
+ * Returns the value of the `return_code' field of the response
+ * block returned from the hardware, or other error indication.
+ * Value of zero indicates successful execution of the command.
+ *
+ * Callback function gets called one or more times, with cb_cmd
+ * pointing to the response returned by the hardware. Callback
+ * function must return non-zero if more reply blocks are expected,
+ * and zero if the last or only reply block is received. Callback
+ * function can get the value of the reply_param pointer from the
+ * field 'param' of the structure qeth_reply.
+ */
+
 int qeth_send_control_data(struct qeth_card *card, int len,
                struct qeth_cmd_buffer *iob,
-               int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
-                       unsigned long),
+               int (*reply_cb)(struct qeth_card *cb_card,
+                               struct qeth_reply *cb_reply,
+                               unsigned long cb_cmd),
                void *reply_param)
 {
        int rc;
@@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
        struct qeth_cmd_buffer *iob;
        struct qeth_ipa_cmd *cmd;
 
-       iob = qeth_wait_for_buffer(&card->write);
-       cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+       iob = qeth_get_buffer(&card->write);
+       if (iob) {
+               cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+               qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+       } else {
+               dev_warn(&card->gdev->dev,
+                        "The qeth driver ran out of channel command buffers\n");
+               QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
+                                dev_name(&card->gdev->dev));
+       }
 
        return iob;
 }
@@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
 }
 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
 
+/**
+ * qeth_send_ipa_cmd() - send an IPA command
+ *
+ * See qeth_send_control_data() for explanation of the arguments.
+ */
+
 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
                int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
                        unsigned long),
@@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card)
        QETH_DBF_TEXT(SETUP, 2, "strtlan");
 
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
        return rc;
 }
@@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
 
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
                                     QETH_PROT_IPV4);
-       cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
-       cmd->data.setadapterparms.hdr.command_code = command;
-       cmd->data.setadapterparms.hdr.used_total = 1;
-       cmd->data.setadapterparms.hdr.seq_no = 1;
+       if (iob) {
+               cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+               cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
+               cmd->data.setadapterparms.hdr.command_code = command;
+               cmd->data.setadapterparms.hdr.used_total = 1;
+               cmd->data.setadapterparms.hdr.seq_no = 1;
+       }
 
        return iob;
 }
@@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
        QETH_CARD_TEXT(card, 3, "queryadp");
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
                                   sizeof(struct qeth_ipacmd_setadpparms));
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
        return rc;
 }
@@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
 
        QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
        return rc;
 }
@@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card,
                return -ENOMEDIUM;
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
                                sizeof(struct qeth_ipacmd_setadpparms_hdr));
+       if (!iob)
+               return -ENOMEM;
        return qeth_send_ipa_cmd(card, iob,
                                qeth_query_switch_attributes_cb, sw_info);
 }
@@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card)
 
        QETH_DBF_TEXT(SETUP, 2, "qdiagass");
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.diagass.subcmd_len = 16;
        cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
@@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
 
        QETH_DBF_TEXT(SETUP, 2, "diagtrap");
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.diagass.subcmd_len = 80;
        cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
@@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
 
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
                        sizeof(struct qeth_ipacmd_setadpparms));
+       if (!iob)
+               return;
        cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
        cmd->data.setadapterparms.data.mode = mode;
        qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
@@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
 
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
                                   sizeof(struct qeth_ipacmd_setadpparms));
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
        cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
@@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
                                   sizeof(struct qeth_ipacmd_setadpparms_hdr) +
                                   sizeof(struct qeth_set_access_ctrl));
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
        access_ctrl_req->subcmd_code = isolation;
@@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
 
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
                                   QETH_SNMP_SETADP_CMDLENGTH + req_len);
+       if (!iob) {
+               rc = -ENOMEM;
+               goto out;
+       }
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
        rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
@@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
                if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
                        rc = -EFAULT;
        }
-
+out:
        kfree(ureq);
        kfree(qinfo.udata);
        return rc;
@@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
                                   sizeof(struct qeth_ipacmd_setadpparms_hdr) +
                                   sizeof(struct qeth_query_oat));
+       if (!iob) {
+               rc = -ENOMEM;
+               goto out_free;
+       }
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        oat_req = &cmd->data.setadapterparms.data.query_oat;
        oat_req->subcmd_code = oat_data.command;
@@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card,
                return -EOPNOTSUPP;
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
                sizeof(struct qeth_ipacmd_setadpparms_hdr));
+       if (!iob)
+               return -ENOMEM;
        return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
                                        (void *)carrier_info);
 }
@@ -5060,11 +5133,23 @@ retriable:
        card->options.adp.supported_funcs = 0;
        card->options.sbp.supported_funcs = 0;
        card->info.diagass_support = 0;
-       qeth_query_ipassists(card, QETH_PROT_IPV4);
-       if (qeth_is_supported(card, IPA_SETADAPTERPARMS))
-               qeth_query_setadapterparms(card);
-       if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
-               qeth_query_setdiagass(card);
+       rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
+       if (rc == -ENOMEM)
+               goto out;
+       if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
+               rc = qeth_query_setadapterparms(card);
+               if (rc < 0) {
+                       QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+                       goto out;
+               }
+       }
+       if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
+               rc = qeth_query_setdiagass(card);
+               if (rc < 0) {
+                       QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+                       goto out;
+               }
+       }
        return 0;
 out:
        dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
index 15523f0e4c03666d77d7203102b8f3548cd86e3d..423bec56cffa09115610b05b908307462774e909 100644 (file)
@@ -231,7 +231,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct qeth_card *card = dev_get_drvdata(dev);
-       char *tmp;
        int rc = 0;
 
        if (!card)
@@ -253,36 +252,35 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
                goto out;
        }
 
-       tmp = strsep((char **) &buf, "\n");
-       if (!strcmp(tmp, "prio_queueing_prec")) {
+       if (sysfs_streq(buf, "prio_queueing_prec")) {
                card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
                card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
-       } else if (!strcmp(tmp, "prio_queueing_skb")) {
+       } else if (sysfs_streq(buf, "prio_queueing_skb")) {
                card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_SKB;
                card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
-       } else if (!strcmp(tmp, "prio_queueing_tos")) {
+       } else if (sysfs_streq(buf, "prio_queueing_tos")) {
                card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
                card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
-       } else if (!strcmp(tmp, "prio_queueing_vlan")) {
+       } else if (sysfs_streq(buf, "prio_queueing_vlan")) {
                if (!card->options.layer2) {
                        rc = -ENOTSUPP;
                        goto out;
                }
                card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
                card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
-       } else if (!strcmp(tmp, "no_prio_queueing:0")) {
+       } else if (sysfs_streq(buf, "no_prio_queueing:0")) {
                card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
                card->qdio.default_out_queue = 0;
-       } else if (!strcmp(tmp, "no_prio_queueing:1")) {
+       } else if (sysfs_streq(buf, "no_prio_queueing:1")) {
                card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
                card->qdio.default_out_queue = 1;
-       } else if (!strcmp(tmp, "no_prio_queueing:2")) {
+       } else if (sysfs_streq(buf, "no_prio_queueing:2")) {
                card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
                card->qdio.default_out_queue = 2;
-       } else if (!strcmp(tmp, "no_prio_queueing:3")) {
+       } else if (sysfs_streq(buf, "no_prio_queueing:3")) {
                card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
                card->qdio.default_out_queue = 3;
-       } else if (!strcmp(tmp, "no_prio_queueing")) {
+       } else if (sysfs_streq(buf, "no_prio_queueing")) {
                card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
                card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
        } else
@@ -497,8 +495,6 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
        struct qeth_card *card = dev_get_drvdata(dev);
        enum qeth_ipa_isolation_modes isolation;
        int rc = 0;
-       char *tmp, *curtoken;
-       curtoken = (char *) buf;
 
        if (!card)
                return -EINVAL;
@@ -515,12 +511,11 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
        }
 
        /* parse input into isolation mode */
-       tmp = strsep(&curtoken, "\n");
-       if (!strcmp(tmp, ATTR_QETH_ISOLATION_NONE)) {
+       if (sysfs_streq(buf, ATTR_QETH_ISOLATION_NONE)) {
                isolation = ISOLATION_MODE_NONE;
-       } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_FWD)) {
+       } else if (sysfs_streq(buf, ATTR_QETH_ISOLATION_FWD)) {
                isolation = ISOLATION_MODE_FWD;
-       } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_DROP)) {
+       } else if (sysfs_streq(buf, ATTR_QETH_ISOLATION_DROP)) {
                isolation = ISOLATION_MODE_DROP;
        } else {
                rc = -EINVAL;
@@ -531,8 +526,7 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
        /* defer IP assist if device is offline (until discipline->set_online)*/
        card->options.prev_isolation = card->options.isolation;
        card->options.isolation = isolation;
-       if (card->state == CARD_STATE_SOFTSETUP ||
-           card->state == CARD_STATE_UP) {
+       if (qeth_card_hw_is_reachable(card)) {
                int ipa_rc = qeth_set_access_ctrl_online(card, 1);
                if (ipa_rc != 0)
                        rc = ipa_rc;
@@ -555,7 +549,7 @@ static ssize_t qeth_dev_switch_attrs_show(struct device *dev,
        if (!card)
                return -EINVAL;
 
-       if (card->state != CARD_STATE_SOFTSETUP && card->state != CARD_STATE_UP)
+       if (!qeth_card_hw_is_reachable(card))
                return sprintf(buf, "n/a\n");
 
        rc = qeth_query_switch_attributes(card, &sw_info);
@@ -598,19 +592,16 @@ static ssize_t qeth_hw_trap_store(struct device *dev,
 {
        struct qeth_card *card = dev_get_drvdata(dev);
        int rc = 0;
-       char *tmp, *curtoken;
        int state = 0;
-       curtoken = (char *)buf;
 
        if (!card)
                return -EINVAL;
 
        mutex_lock(&card->conf_mutex);
-       if (card->state == CARD_STATE_SOFTSETUP || card->state == CARD_STATE_UP)
+       if (qeth_card_hw_is_reachable(card))
                state = 1;
-       tmp = strsep(&curtoken, "\n");
 
-       if (!strcmp(tmp, "arm") && !card->info.hwtrap) {
+       if (sysfs_streq(buf, "arm") && !card->info.hwtrap) {
                if (state) {
                        if (qeth_is_diagass_supported(card,
                            QETH_DIAGS_CMD_TRAP)) {
@@ -621,14 +612,14 @@ static ssize_t qeth_hw_trap_store(struct device *dev,
                                rc = -EINVAL;
                } else
                        card->info.hwtrap = 1;
-       } else if (!strcmp(tmp, "disarm") && card->info.hwtrap) {
+       } else if (sysfs_streq(buf, "disarm") && card->info.hwtrap) {
                if (state) {
                        rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
                        if (!rc)
                                card->info.hwtrap = 0;
                } else
                        card->info.hwtrap = 0;
-       } else if (!strcmp(tmp, "trap") && state && card->info.hwtrap)
+       } else if (sysfs_streq(buf, "trap") && state && card->info.hwtrap)
                rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_CAPTURE);
        else
                rc = -EINVAL;
index d02cd1a679432fc7ef485295d308bac046f96df9..0ea0869120cf4762db9223af4ddd8e78f6f3a1db 100644 (file)
@@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *);
 static int qeth_l2_stop(struct net_device *);
 static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
 static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
-                          enum qeth_ipa_cmds,
-                          int (*reply_cb) (struct qeth_card *,
-                                           struct qeth_reply*,
-                                           unsigned long));
+                          enum qeth_ipa_cmds);
 static void qeth_l2_set_multicast_list(struct net_device *);
 static int qeth_l2_recover(void *);
 static void qeth_bridgeport_query_support(struct qeth_card *card);
@@ -48,8 +45,7 @@ static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        if (!card)
                return -ENODEV;
 
-       if ((card->state != CARD_STATE_UP) &&
-               (card->state != CARD_STATE_SOFTSETUP))
+       if (!qeth_card_hw_is_reachable(card))
                return -ENODEV;
 
        if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -130,56 +126,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
        return ndev;
 }
 
-static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
-                               struct qeth_reply *reply,
-                               unsigned long data)
+static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
 {
-       struct qeth_ipa_cmd *cmd;
-       __u8 *mac;
+       int rc;
 
-       QETH_CARD_TEXT(card, 2, "L2Sgmacb");
-       cmd = (struct qeth_ipa_cmd *) data;
-       mac = &cmd->data.setdelmac.mac[0];
-       /* MAC already registered, needed in couple/uncouple case */
-       if (cmd->hdr.return_code ==  IPA_RC_L2_DUP_MAC) {
-               QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n",
-                         mac, QETH_CARD_IFNAME(card));
-               cmd->hdr.return_code = 0;
+       if (retcode)
+               QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
+       switch (retcode) {
+       case IPA_RC_SUCCESS:
+               rc = 0;
+               break;
+       case IPA_RC_L2_UNSUPPORTED_CMD:
+               rc = -ENOSYS;
+               break;
+       case IPA_RC_L2_ADDR_TABLE_FULL:
+               rc = -ENOSPC;
+               break;
+       case IPA_RC_L2_DUP_MAC:
+       case IPA_RC_L2_DUP_LAYER3_MAC:
+               rc = -EEXIST;
+               break;
+       case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
+       case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+               rc = -EPERM;
+               break;
+       case IPA_RC_L2_MAC_NOT_FOUND:
+               rc = -ENOENT;
+               break;
+       case -ENOMEM:
+               rc = -ENOMEM;
+               break;
+       default:
+               rc = -EIO;
+               break;
        }
-       if (cmd->hdr.return_code)
-               QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n",
-                         mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
-       return 0;
+       return rc;
 }
 
 static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
 {
-       QETH_CARD_TEXT(card, 2, "L2Sgmac");
-       return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
-                                         qeth_l2_send_setgroupmac_cb);
-}
-
-static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
-                               struct qeth_reply *reply,
-                               unsigned long data)
-{
-       struct qeth_ipa_cmd *cmd;
-       __u8 *mac;
+       int rc;
 
-       QETH_CARD_TEXT(card, 2, "L2Dgmacb");
-       cmd = (struct qeth_ipa_cmd *) data;
-       mac = &cmd->data.setdelmac.mac[0];
-       if (cmd->hdr.return_code)
-               QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n",
-                         mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
-       return 0;
+       QETH_CARD_TEXT(card, 2, "L2Sgmac");
+       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+                                       IPA_CMD_SETGMAC));
+       if (rc == -EEXIST)
+               QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
+                       mac, QETH_CARD_IFNAME(card));
+       else if (rc)
+               QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
+                       mac, QETH_CARD_IFNAME(card), rc);
+       return rc;
 }
 
 static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
 {
+       int rc;
+
        QETH_CARD_TEXT(card, 2, "L2Dgmac");
-       return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
-                                         qeth_l2_send_delgroupmac_cb);
+       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+                                       IPA_CMD_DELGMAC));
+       if (rc)
+               QETH_DBF_MESSAGE(2,
+                       "Could not delete group MAC %pM on %s: %d\n",
+                       mac, QETH_CARD_IFNAME(card), rc);
+       return rc;
 }
 
 static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
@@ -197,10 +208,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
        mc->is_vmac = vmac;
 
        if (vmac) {
-               rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
-                                       NULL);
+               rc = qeth_setdel_makerc(card,
+                       qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC));
        } else {
-               rc = qeth_l2_send_setgroupmac(card, mac);
+               rc = qeth_setdel_makerc(card,
+                       qeth_l2_send_setgroupmac(card, mac));
        }
 
        if (!rc)
@@ -218,7 +230,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
                if (del) {
                        if (mc->is_vmac)
                                qeth_l2_send_setdelmac(card, mc->mc_addr,
-                                       IPA_CMD_DELVMAC, NULL);
+                                       IPA_CMD_DELVMAC);
                        else
                                qeth_l2_send_delgroupmac(card, mc->mc_addr);
                }
@@ -291,6 +303,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
 
        QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
        iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setdelvlan.vlan_id = i;
        return qeth_send_ipa_cmd(card, iob,
@@ -313,6 +327,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
 {
        struct qeth_card *card = dev->ml_priv;
        struct qeth_vlan_vid *id;
+       int rc;
 
        QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
        if (!vid)
@@ -328,7 +343,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
        id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
        if (id) {
                id->vid = vid;
-               qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+               rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+               if (rc) {
+                       kfree(id);
+                       return rc;
+               }
                spin_lock_bh(&card->vlanlock);
                list_add_tail(&id->list, &card->vid_list);
                spin_unlock_bh(&card->vlanlock);
@@ -343,6 +362,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
 {
        struct qeth_vlan_vid *id, *tmpid = NULL;
        struct qeth_card *card = dev->ml_priv;
+       int rc = 0;
 
        QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
        if (card->info.type == QETH_CARD_TYPE_OSM) {
@@ -363,11 +383,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
        }
        spin_unlock_bh(&card->vlanlock);
        if (tmpid) {
-               qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
+               rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
                kfree(tmpid);
        }
        qeth_l2_set_multicast_list(card->dev);
-       return 0;
+       return rc;
 }
 
 static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -539,91 +559,62 @@ out:
 }
 
 static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
-                          enum qeth_ipa_cmds ipacmd,
-                          int (*reply_cb) (struct qeth_card *,
-                                           struct qeth_reply*,
-                                           unsigned long))
+                          enum qeth_ipa_cmds ipacmd)
 {
        struct qeth_ipa_cmd *cmd;
        struct qeth_cmd_buffer *iob;
 
        QETH_CARD_TEXT(card, 2, "L2sdmac");
        iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
        memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
-       return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
+       return qeth_send_ipa_cmd(card, iob, NULL, NULL);
 }
 
-static int qeth_l2_send_setmac_cb(struct qeth_card *card,
-                          struct qeth_reply *reply,
-                          unsigned long data)
+static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
 {
-       struct qeth_ipa_cmd *cmd;
+       int rc;
 
-       QETH_CARD_TEXT(card, 2, "L2Smaccb");
-       cmd = (struct qeth_ipa_cmd *) data;
-       if (cmd->hdr.return_code) {
-               QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code);
+       QETH_CARD_TEXT(card, 2, "L2Setmac");
+       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+                                       IPA_CMD_SETVMAC));
+       if (rc == 0) {
+               card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+               memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+               dev_info(&card->gdev->dev,
+                       "MAC address %pM successfully registered on device %s\n",
+                       card->dev->dev_addr, card->dev->name);
+       } else {
                card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
-               switch (cmd->hdr.return_code) {
-               case IPA_RC_L2_DUP_MAC:
-               case IPA_RC_L2_DUP_LAYER3_MAC:
+               switch (rc) {
+               case -EEXIST:
                        dev_warn(&card->gdev->dev,
-                               "MAC address %pM already exists\n",
-                               cmd->data.setdelmac.mac);
+                               "MAC address %pM already exists\n", mac);
                        break;
-               case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
-               case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+               case -EPERM:
                        dev_warn(&card->gdev->dev,
-                               "MAC address %pM is not authorized\n",
-                               cmd->data.setdelmac.mac);
-                       break;
-               default:
+                               "MAC address %pM is not authorized\n", mac);
                        break;
                }
-       } else {
-               card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-               memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
-                      OSA_ADDR_LEN);
-               dev_info(&card->gdev->dev,
-                       "MAC address %pM successfully registered on device %s\n",
-                       card->dev->dev_addr, card->dev->name);
-       }
-       return 0;
-}
-
-static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
-{
-       QETH_CARD_TEXT(card, 2, "L2Setmac");
-       return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
-                                         qeth_l2_send_setmac_cb);
-}
-
-static int qeth_l2_send_delmac_cb(struct qeth_card *card,
-                          struct qeth_reply *reply,
-                          unsigned long data)
-{
-       struct qeth_ipa_cmd *cmd;
-
-       QETH_CARD_TEXT(card, 2, "L2Dmaccb");
-       cmd = (struct qeth_ipa_cmd *) data;
-       if (cmd->hdr.return_code) {
-               QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
-               return 0;
        }
-       card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
-
-       return 0;
+       return rc;
 }
 
 static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
 {
+       int rc;
+
        QETH_CARD_TEXT(card, 2, "L2Delmac");
        if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
                return 0;
-       return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
-                                         qeth_l2_send_delmac_cb);
+       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+                                       IPA_CMD_DELVMAC));
+       if (rc == 0)
+               card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+       return rc;
 }
 
 static int qeth_l2_request_initial_mac(struct qeth_card *card)
@@ -651,7 +642,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
                if (rc) {
                        QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
                                "device %s: x%x\n", CARD_BUS_ID(card), rc);
-                       QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+                       QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
                        return rc;
                }
                QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
@@ -687,7 +678,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
                return -ERESTARTSYS;
        }
        rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
-       if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND))
+       if (!rc || (rc == -ENOENT))
                rc = qeth_l2_send_setmac(card, addr->sa_data);
        return rc ? -EINVAL : 0;
 }
@@ -996,7 +987,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        recover_flag = card->state;
        rc = qeth_core_hardsetup_card(card);
        if (rc) {
-               QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+               QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
                rc = -ENODEV;
                goto out_remove;
        }
@@ -1344,8 +1335,7 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
        if (!card)
                return -ENODEV;
        QETH_CARD_TEXT(card, 2, "osnsdmc");
-       if ((card->state != CARD_STATE_UP) &&
-           (card->state != CARD_STATE_SOFTSETUP))
+       if (!qeth_card_hw_is_reachable(card))
                return -ENODEV;
        iob = qeth_wait_for_buffer(&card->write);
        memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
@@ -1730,6 +1720,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
 
        QETH_CARD_TEXT(card, 2, "brqsuppo");
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+       if (!iob)
+               return;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.sbp.hdr.cmdlength =
                sizeof(struct qeth_ipacmd_sbp_hdr) +
@@ -1805,6 +1797,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
        if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
                return -EOPNOTSUPP;
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.sbp.hdr.cmdlength =
                sizeof(struct qeth_ipacmd_sbp_hdr);
@@ -1817,9 +1811,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
        if (rc)
                return rc;
        rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
-       if (rc)
-               return rc;
-       return 0;
+       return rc;
 }
 EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
 
@@ -1873,6 +1865,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
        if (!(card->options.sbp.supported_funcs & setcmd))
                return -EOPNOTSUPP;
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.sbp.hdr.cmdlength = cmdlength;
        cmd->data.sbp.hdr.command_code = setcmd;
index 625227ad16ee91cd2b4ca1aaa8a9541408e2a838..04e42c649134c1989a3485b01ff2a43cc380a82d 100644 (file)
@@ -432,10 +432,8 @@ void qeth_l3_set_ip_addr_list(struct qeth_card *card)
        QETH_CARD_TEXT(card, 2, "sdiplist");
        QETH_CARD_HEX(card, 2, &card, sizeof(void *));
 
-       if ((card->state != CARD_STATE_UP &&
-            card->state != CARD_STATE_SOFTSETUP) || card->options.sniffer) {
+       if (!qeth_card_hw_is_reachable(card) || card->options.sniffer)
                return;
-       }
 
        spin_lock_irqsave(&card->ip_lock, flags);
        tbd_list = card->ip_tbd_list;
@@ -549,6 +547,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
        QETH_CARD_TEXT(card, 4, "setdelmc");
 
        iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
        if (addr->proto == QETH_PROT_IPV6)
@@ -588,6 +588,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
        QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
 
        iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        if (addr->proto == QETH_PROT_IPV6) {
                memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
@@ -616,6 +618,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
 
        QETH_CARD_TEXT(card, 4, "setroutg");
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setrtg.type = (type);
        rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
@@ -1049,12 +1053,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
        QETH_CARD_TEXT(card, 4, "getasscm");
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
 
-       cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       cmd->data.setassparms.hdr.assist_no = ipa_func;
-       cmd->data.setassparms.hdr.length = 8 + len;
-       cmd->data.setassparms.hdr.command_code = cmd_code;
-       cmd->data.setassparms.hdr.return_code = 0;
-       cmd->data.setassparms.hdr.seq_no = 0;
+       if (iob) {
+               cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+               cmd->data.setassparms.hdr.assist_no = ipa_func;
+               cmd->data.setassparms.hdr.length = 8 + len;
+               cmd->data.setassparms.hdr.command_code = cmd_code;
+               cmd->data.setassparms.hdr.return_code = 0;
+               cmd->data.setassparms.hdr.seq_no = 0;
+       }
 
        return iob;
 }
@@ -1090,6 +1096,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
        QETH_CARD_TEXT(card, 4, "simassp6");
        iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
                                       0, QETH_PROT_IPV6);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_l3_send_setassparms(card, iob, 0, 0,
                                   qeth_l3_default_setassparms_cb, NULL);
        return rc;
@@ -1108,6 +1116,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
                length = sizeof(__u32);
        iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
                                       length, QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_l3_send_setassparms(card, iob, length, data,
                                   qeth_l3_default_setassparms_cb, NULL);
        return rc;
@@ -1494,6 +1504,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
 
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
                                     QETH_PROT_IPV6);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
                        card->info.unique_id;
@@ -1537,6 +1549,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
 
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
                                     QETH_PROT_IPV6);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
                        card->info.unique_id;
@@ -1611,6 +1625,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
        QETH_DBF_TEXT(SETUP, 2, "diagtrac");
 
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.diagass.subcmd_len = 16;
        cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
@@ -2442,6 +2458,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
                        IPA_CMD_ASS_ARP_QUERY_INFO,
                        sizeof(struct qeth_arp_query_data) - sizeof(char),
                        prot);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
        cmd->data.setassparms.data.query_arp.reply_bits = 0;
@@ -2535,6 +2553,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
                                       IPA_CMD_ASS_ARP_ADD_ENTRY,
                                       sizeof(struct qeth_arp_cache_entry),
                                       QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_l3_send_setassparms(card, iob,
                                   sizeof(struct qeth_arp_cache_entry),
                                   (unsigned long) entry,
@@ -2574,6 +2594,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
                                       IPA_CMD_ASS_ARP_REMOVE_ENTRY,
                                       12,
                                       QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_l3_send_setassparms(card, iob,
                                   12, (unsigned long)buf,
                                   qeth_l3_default_setassparms_cb, NULL);
@@ -2626,8 +2648,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        if (!card)
                return -ENODEV;
 
-       if ((card->state != CARD_STATE_UP) &&
-               (card->state != CARD_STATE_SOFTSETUP))
+       if (!qeth_card_hw_is_reachable(card))
                return -ENODEV;
 
        switch (cmd) {
@@ -2800,12 +2821,12 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
         * before we're going to overwrite this location with next hop ip.
         * v6 uses passthrough, v4 sets the tag in the QDIO header.
         */
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
                        hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
                else
                        hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
-               hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
+               hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
        }
 
        hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
@@ -2986,7 +3007,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        skb_pull(new_skb, ETH_HLEN);
                }
 
-               if (ipv != 4 && vlan_tx_tag_present(new_skb)) {
+               if (ipv != 4 && skb_vlan_tag_present(new_skb)) {
                        skb_push(new_skb, VLAN_HLEN);
                        skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
                        skb_copy_to_linear_data_offset(new_skb, 4,
@@ -2995,7 +3016,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                new_skb->data + 12, 4);
                        tag = (u16 *)(new_skb->data + 12);
                        *tag = __constant_htons(ETH_P_8021Q);
-                       *(tag + 1) = htons(vlan_tx_tag_get(new_skb));
+                       *(tag + 1) = htons(skb_vlan_tag_get(new_skb));
                }
        }
 
@@ -3262,6 +3283,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
 
 static int qeth_l3_setup_netdev(struct qeth_card *card)
 {
+       int rc;
+
        if (card->info.type == QETH_CARD_TYPE_OSD ||
            card->info.type == QETH_CARD_TYPE_OSX) {
                if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
@@ -3293,7 +3316,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                        return -ENODEV;
                card->dev->flags |= IFF_NOARP;
                card->dev->netdev_ops = &qeth_l3_netdev_ops;
-               qeth_l3_iqd_read_initial_mac(card);
+               rc = qeth_l3_iqd_read_initial_mac(card);
+               if (rc)
+                       return rc;
                if (card->options.hsuid[0])
                        memcpy(card->dev->perm_addr, card->options.hsuid, 9);
        } else
@@ -3360,7 +3385,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        recover_flag = card->state;
        rc = qeth_core_hardsetup_card(card);
        if (rc) {
-               QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+               QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
                rc = -ENODEV;
                goto out_remove;
        }
@@ -3401,7 +3426,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
 contin:
        rc = qeth_l3_setadapter_parms(card);
        if (rc)
-               QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+               QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
        if (!card->options.sniffer) {
                rc = qeth_l3_start_ipassists(card);
                if (rc) {
@@ -3410,10 +3435,10 @@ contin:
                }
                rc = qeth_l3_setrouting_v4(card);
                if (rc)
-                       QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+                       QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);
                rc = qeth_l3_setrouting_v6(card);
                if (rc)
-                       QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+                       QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
        }
        netif_tx_disable(card->dev);
 
index adef5f5de118a7ed9b320b3cea07672bc511ec36..386eb7b89b1ea23b58483284410dba10349c5dcd 100644 (file)
@@ -57,29 +57,26 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
                const char *buf, size_t count)
 {
        enum qeth_routing_types old_route_type = route->type;
-       char *tmp;
        int rc = 0;
 
-       tmp = strsep((char **) &buf, "\n");
        mutex_lock(&card->conf_mutex);
-       if (!strcmp(tmp, "no_router")) {
+       if (sysfs_streq(buf, "no_router")) {
                route->type = NO_ROUTER;
-       } else if (!strcmp(tmp, "primary_connector")) {
+       } else if (sysfs_streq(buf, "primary_connector")) {
                route->type = PRIMARY_CONNECTOR;
-       } else if (!strcmp(tmp, "secondary_connector")) {
+       } else if (sysfs_streq(buf, "secondary_connector")) {
                route->type = SECONDARY_CONNECTOR;
-       } else if (!strcmp(tmp, "primary_router")) {
+       } else if (sysfs_streq(buf, "primary_router")) {
                route->type = PRIMARY_ROUTER;
-       } else if (!strcmp(tmp, "secondary_router")) {
+       } else if (sysfs_streq(buf, "secondary_router")) {
                route->type = SECONDARY_ROUTER;
-       } else if (!strcmp(tmp, "multicast_router")) {
+       } else if (sysfs_streq(buf, "multicast_router")) {
                route->type = MULTICAST_ROUTER;
        } else {
                rc = -EINVAL;
                goto out;
        }
-       if (((card->state == CARD_STATE_SOFTSETUP) ||
-            (card->state == CARD_STATE_UP)) &&
+       if (qeth_card_hw_is_reachable(card) &&
            (old_route_type != route->type)) {
                if (prot == QETH_PROT_IPV4)
                        rc = qeth_l3_setrouting_v4(card);
@@ -371,7 +368,6 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
 {
        struct qeth_card *card = dev_get_drvdata(dev);
        struct qeth_ipaddr *tmpipa, *t;
-       char *tmp;
        int rc = 0;
 
        if (!card)
@@ -384,10 +380,9 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
                goto out;
        }
 
-       tmp = strsep((char **) &buf, "\n");
-       if (!strcmp(tmp, "toggle")) {
+       if (sysfs_streq(buf, "toggle")) {
                card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
-       } else if (!strcmp(tmp, "1")) {
+       } else if (sysfs_streq(buf, "1")) {
                card->ipato.enabled = 1;
                list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
                        if ((tmpipa->type == QETH_IP_TYPE_NORMAL) &&
@@ -396,7 +391,7 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
                                        QETH_IPA_SETIP_TAKEOVER_FLAG;
                }
 
-       } else if (!strcmp(tmp, "0")) {
+       } else if (sysfs_streq(buf, "0")) {
                card->ipato.enabled = 0;
                list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
                        if (tmpipa->set_flags &
@@ -431,21 +426,19 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
                                const char *buf, size_t count)
 {
        struct qeth_card *card = dev_get_drvdata(dev);
-       char *tmp;
        int rc = 0;
 
        if (!card)
                return -EINVAL;
 
        mutex_lock(&card->conf_mutex);
-       tmp = strsep((char **) &buf, "\n");
-       if (!strcmp(tmp, "toggle")) {
+       if (sysfs_streq(buf, "toggle"))
                card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
-       } else if (!strcmp(tmp, "1")) {
+       else if (sysfs_streq(buf, "1"))
                card->ipato.invert4 = 1;
-       } else if (!strcmp(tmp, "0")) {
+       else if (sysfs_streq(buf, "0"))
                card->ipato.invert4 = 0;
-       else
+       else
                rc = -EINVAL;
        mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
@@ -613,21 +606,19 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct qeth_card *card = dev_get_drvdata(dev);
-       char *tmp;
        int rc = 0;
 
        if (!card)
                return -EINVAL;
 
        mutex_lock(&card->conf_mutex);
-       tmp = strsep((char **) &buf, "\n");
-       if (!strcmp(tmp, "toggle")) {
+       if (sysfs_streq(buf, "toggle"))
                card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
-       } else if (!strcmp(tmp, "1")) {
+       else if (sysfs_streq(buf, "1"))
                card->ipato.invert6 = 1;
-       } else if (!strcmp(tmp, "0")) {
+       else if (sysfs_streq(buf, "0"))
                card->ipato.invert6 = 0;
-       else
+       else
                rc = -EINVAL;
        mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
index 913b9a92fb0641a57f3cdd109ff699f4f230a37c..3681a3fbd4991795edecf7d98cbf57ab645910da 100644 (file)
@@ -8,5 +8,5 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
 obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o
 
 csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \
-               csio_hw.o csio_hw_t4.o csio_hw_t5.o csio_isr.o \
+               csio_hw.o csio_hw_t5.o csio_isr.o \
                csio_mb.o csio_rnode.o csio_wr.o
index 9ab997e18b20bce936bb7b0c34ee040edc2721f5..2e66f34ebb79c9caba0b40027e412b17d85dcce6 100644 (file)
@@ -60,37 +60,10 @@ int csio_msi = 2;
 static int dev_num;
 
 /* FCoE Adapter types & its description */
-static const struct csio_adap_desc csio_t4_fcoe_adapters[] = {
-       {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
-       {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
-       {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
-       {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"},
-       {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"},
-       {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"},
-       {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"},
-       {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"},
-       {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"},
-       {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"},
-       {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"},
-       {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"},
-       {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
-       {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
-       {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
-       {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
-       {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"},
-       {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"},
-       {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"},
-       {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"},
-       {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"},
-       {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"},
-       {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"},
-       {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"}
-};
-
 static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
        {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
        {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
-       {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"},
+       {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"},
        {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
        {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
        {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
@@ -107,7 +80,9 @@ static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
        {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
        {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
        {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
-       {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}
+       {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"},
+       {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"},
+       {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"}
 };
 
 static void csio_mgmtm_cleanup(struct csio_mgmtm *);
@@ -188,9 +163,9 @@ void
 csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
                        unsigned int mask, unsigned int val)
 {
-       csio_wr_reg32(hw, addr, TP_PIO_ADDR);
-       val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask;
-       csio_wr_reg32(hw, val, TP_PIO_DATA);
+       csio_wr_reg32(hw, addr, TP_PIO_ADDR_A);
+       val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask;
+       csio_wr_reg32(hw, val, TP_PIO_DATA_A);
 }
 
 void
@@ -256,7 +231,7 @@ csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
        }
 
        pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
-       *data = le32_to_cpu(*data);
+       *data = le32_to_cpu(*(__le32 *)data);
 
        return 0;
 }
@@ -421,17 +396,15 @@ csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
 
        if (!byte_cnt || byte_cnt > 4)
                return -EINVAL;
-       if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
+       if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
                return -EBUSY;
 
-       cont = cont ? SF_CONT : 0;
-       lock = lock ? SF_LOCK : 0;
-
-       csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP);
-       ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
-                                        10, NULL);
+       csio_wr_reg32(hw,  SF_LOCK_V(lock) | SF_CONT_V(cont) |
+                     BYTECNT_V(byte_cnt - 1), SF_OP_A);
+       ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
+                                      10, NULL);
        if (!ret)
-               *valp = csio_rd_reg32(hw, SF_DATA);
+               *valp = csio_rd_reg32(hw, SF_DATA_A);
        return ret;
 }
 
@@ -453,16 +426,14 @@ csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
 {
        if (!byte_cnt || byte_cnt > 4)
                return -EINVAL;
-       if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
+       if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
                return -EBUSY;
 
-       cont = cont ? SF_CONT : 0;
-       lock = lock ? SF_LOCK : 0;
-
-       csio_wr_reg32(hw, val, SF_DATA);
-       csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP);
+       csio_wr_reg32(hw, val, SF_DATA_A);
+       csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) |
+                     OP_V(1) | SF_LOCK_V(lock), SF_OP_A);
 
-       return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
+       return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
                                        10, NULL);
 }
 
@@ -533,11 +504,11 @@ csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
        for ( ; nwords; nwords--, data++) {
                ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
                if (nwords == 1)
-                       csio_wr_reg32(hw, 0, SF_OP);    /* unlock SF */
+                       csio_wr_reg32(hw, 0, SF_OP_A);    /* unlock SF */
                if (ret)
                        return ret;
                if (byte_oriented)
-                       *data = htonl(*data);
+                       *data = (__force __u32) htonl(*data);
        }
        return 0;
 }
@@ -586,7 +557,7 @@ csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
        if (ret)
                goto unlock;
 
-       csio_wr_reg32(hw, 0, SF_OP);    /* unlock SF */
+       csio_wr_reg32(hw, 0, SF_OP_A);    /* unlock SF */
 
        /* Read the page to verify the write succeeded */
        ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
@@ -603,7 +574,7 @@ csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
        return 0;
 
 unlock:
-       csio_wr_reg32(hw, 0, SF_OP);    /* unlock SF */
+       csio_wr_reg32(hw, 0, SF_OP_A);    /* unlock SF */
        return ret;
 }
 
@@ -641,7 +612,7 @@ out:
        if (ret)
                csio_err(hw, "erase of flash sector %d failed, error %d\n",
                         start, ret);
-       csio_wr_reg32(hw, 0, SF_OP);    /* unlock SF */
+       csio_wr_reg32(hw, 0, SF_OP_A);    /* unlock SF */
        return 0;
 }
 
@@ -665,7 +636,7 @@ csio_hw_print_fw_version(struct csio_hw *hw, char *str)
 static int
 csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers)
 {
-       return csio_hw_read_flash(hw, FW_IMG_START +
+       return csio_hw_read_flash(hw, FLASH_FW_START +
                                  offsetof(struct fw_hdr, fw_ver), 1,
                                  vers, 0);
 }
@@ -685,43 +656,6 @@ csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
                        vers, 0);
 }
 
-/*
- *     csio_hw_check_fw_version - check if the FW is compatible with
- *                                this driver
- *     @hw: HW module
- *
- *     Checks if an adapter's FW is compatible with the driver.  Returns 0
- *     if there's exact match, a negative error if the version could not be
- *     read or there's a major/minor version mismatch/minor.
- */
-static int
-csio_hw_check_fw_version(struct csio_hw *hw)
-{
-       int ret, major, minor, micro;
-
-       ret = csio_hw_get_fw_version(hw, &hw->fwrev);
-       if (!ret)
-               ret = csio_hw_get_tp_version(hw, &hw->tp_vers);
-       if (ret)
-               return ret;
-
-       major = FW_HDR_FW_VER_MAJOR_G(hw->fwrev);
-       minor = FW_HDR_FW_VER_MINOR_G(hw->fwrev);
-       micro = FW_HDR_FW_VER_MICRO_G(hw->fwrev);
-
-       if (major != FW_VERSION_MAJOR(hw)) {    /* major mismatch - fail */
-               csio_err(hw, "card FW has major version %u, driver wants %u\n",
-                        major, FW_VERSION_MAJOR(hw));
-               return -EINVAL;
-       }
-
-       if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw))
-               return 0;        /* perfect match */
-
-       /* Minor/micro version mismatch */
-       return -EINVAL;
-}
-
 /*
  * csio_hw_fw_dload - download firmware.
  * @hw: HW module
@@ -762,9 +696,9 @@ csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
                return -EINVAL;
        }
 
-       if (size > FW_MAX_SIZE) {
+       if (size > FLASH_FW_MAX_SIZE) {
                csio_err(hw, "FW image too large, max is %u bytes\n",
-                           FW_MAX_SIZE);
+                           FLASH_FW_MAX_SIZE);
                return -EINVAL;
        }
 
@@ -780,10 +714,10 @@ csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
        i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
 
        csio_dbg(hw, "Erasing sectors... start:%d end:%d\n",
-                         FW_START_SEC, FW_START_SEC + i - 1);
+                         FLASH_FW_START_SEC, FLASH_FW_START_SEC + i - 1);
 
-       ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC,
-                                         FW_START_SEC + i - 1);
+       ret = csio_hw_flash_erase_sectors(hw, FLASH_FW_START_SEC,
+                                         FLASH_FW_START_SEC + i - 1);
        if (ret) {
                csio_err(hw, "Flash Erase failed\n");
                goto out;
@@ -796,14 +730,14 @@ csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
         */
        memcpy(first_page, fw_data, SF_PAGE_SIZE);
        ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
-       ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page);
+       ret = csio_hw_write_flash(hw, FLASH_FW_START, SF_PAGE_SIZE, first_page);
        if (ret)
                goto out;
 
        csio_dbg(hw, "Writing Flash .. start:%d end:%d\n",
                    FW_IMG_START, FW_IMG_START + size);
 
-       addr = FW_IMG_START;
+       addr = FLASH_FW_START;
        for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
                addr += SF_PAGE_SIZE;
                fw_data += SF_PAGE_SIZE;
@@ -813,7 +747,7 @@ csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
        }
 
        ret = csio_hw_write_flash(hw,
-                                 FW_IMG_START +
+                                 FLASH_FW_START +
                                        offsetof(struct fw_hdr, fw_ver),
                                  sizeof(hdr->fw_ver),
                                  (const uint8_t *)&hdr->fw_ver);
@@ -833,7 +767,7 @@ csio_hw_get_flash_params(struct csio_hw *hw)
        ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
        if (!ret)
                ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
-       csio_wr_reg32(hw, 0, SF_OP);    /* unlock SF */
+       csio_wr_reg32(hw, 0, SF_OP_A);    /* unlock SF */
        if (ret != 0)
                return ret;
 
@@ -861,17 +795,17 @@ csio_hw_dev_ready(struct csio_hw *hw)
        uint32_t reg;
        int cnt = 6;
 
-       while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) &&
-                                                               (--cnt != 0))
+       while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) &&
+              (--cnt != 0))
                mdelay(100);
 
-       if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) ||
-                           (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) {
+       if ((cnt == 0) && (((int32_t)(SOURCEPF_G(reg)) < 0) ||
+                          (SOURCEPF_G(reg) >= CSIO_MAX_PFN))) {
                csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
                return -EIO;
        }
 
-       hw->pfn = SOURCEPF_GET(reg);
+       hw->pfn = SOURCEPF_G(reg);
 
        return 0;
 }
@@ -959,8 +893,8 @@ retry:
                         * timeout ... and then retry if we haven't exhausted
                         * our retries ...
                         */
-                       pcie_fw = csio_rd_reg32(hw, PCIE_FW);
-                       if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
+                       pcie_fw = csio_rd_reg32(hw, PCIE_FW_A);
+                       if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
                                if (waiting <= 0) {
                                        if (retries-- > 0)
                                                goto retry;
@@ -976,10 +910,10 @@ retry:
                         * report errors preferentially.
                         */
                        if (state) {
-                               if (pcie_fw & PCIE_FW_ERR) {
+                               if (pcie_fw & PCIE_FW_ERR_F) {
                                        *state = CSIO_DEV_STATE_ERR;
                                        rv = -ETIMEDOUT;
-                               } else if (pcie_fw & PCIE_FW_INIT)
+                               } else if (pcie_fw & PCIE_FW_INIT_F)
                                        *state = CSIO_DEV_STATE_INIT;
                        }
 
@@ -988,9 +922,9 @@ retry:
                         * there's not a valid Master PF, grab its identity
                         * for our caller.
                         */
-                       if (mpfn == PCIE_FW_MASTER_MASK &&
-                           (pcie_fw & PCIE_FW_MASTER_VLD))
-                               mpfn = PCIE_FW_MASTER_GET(pcie_fw);
+                       if (mpfn == PCIE_FW_MASTER_M &&
+                           (pcie_fw & PCIE_FW_MASTER_VLD_F))
+                               mpfn = PCIE_FW_MASTER_G(pcie_fw);
                        break;
                }
                hw->flags &= ~CSIO_HWF_MASTER;
@@ -1078,7 +1012,7 @@ csio_do_reset(struct csio_hw *hw, bool fw_rst)
 
        if (!fw_rst) {
                /* PIO reset */
-               csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+               csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
                mdelay(2000);
                return 0;
        }
@@ -1090,7 +1024,7 @@ csio_do_reset(struct csio_hw *hw, bool fw_rst)
        }
 
        csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
-                     PIORSTMODE | PIORST, 0, NULL);
+                     PIORSTMODE_F | PIORST_F, 0, NULL);
 
        if (csio_mb_issue(hw, mbp)) {
                csio_err(hw, "Issue of RESET command failed.n");
@@ -1156,7 +1090,7 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
         * If a legitimate mailbox is provided, issue a RESET command
         * with a HALT indication.
         */
-       if (mbox <= PCIE_FW_MASTER_MASK) {
+       if (mbox <= PCIE_FW_MASTER_M) {
                struct csio_mb  *mbp;
 
                mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
@@ -1166,7 +1100,7 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
                }
 
                csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
-                             PIORSTMODE | PIORST, FW_RESET_CMD_HALT_F,
+                             PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F,
                              NULL);
 
                if (csio_mb_issue(hw, mbp)) {
@@ -1193,8 +1127,9 @@ csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
         * rather than a RESET ... if it's new enough to understand that ...
         */
        if (retval == 0 || force) {
-               csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST);
-               csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT);
+               csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
+               csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F,
+                                  PCIE_FW_HALT_F);
        }
 
        /*
@@ -1234,7 +1169,7 @@ csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
                 * doing it automatically, we need to clear the PCIE_FW.HALT
                 * bit.
                 */
-               csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0);
+               csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0);
 
                /*
                 * If we've been given a valid mailbox, first try to get the
@@ -1243,21 +1178,21 @@ csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
                 * valid mailbox or the RESET command failed, fall back to
                 * hitting the chip with a hammer.
                 */
-               if (mbox <= PCIE_FW_MASTER_MASK) {
-                       csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
+               if (mbox <= PCIE_FW_MASTER_M) {
+                       csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
                        msleep(100);
                        if (csio_do_reset(hw, true) == 0)
                                return 0;
                }
 
-               csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+               csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
                msleep(2000);
        } else {
                int ms;
 
-               csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
+               csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
                for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
-                       if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT))
+                       if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F))
                                return 0;
                        msleep(100);
                        ms += 100;
@@ -1315,116 +1250,6 @@ csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox,
        return csio_hw_fw_restart(hw, mbox, reset);
 }
 
-
-/*
- *     csio_hw_fw_config_file - setup an adapter via a Configuration File
- *     @hw: the HW module
- *     @mbox: mailbox to use for the FW command
- *     @mtype: the memory type where the Configuration File is located
- *     @maddr: the memory address where the Configuration File is located
- *     @finiver: return value for CF [fini] version
- *     @finicsum: return value for CF [fini] checksum
- *     @cfcsum: return value for CF computed checksum
- *
- *     Issue a command to get the firmware to process the Configuration
- *     File located at the specified mtype/maddress.  If the Configuration
- *     File is processed successfully and return value pointers are
- *     provided, the Configuration File "[fini] section version and
- *     checksum values will be returned along with the computed checksum.
- *     It's up to the caller to decide how it wants to respond to the
- *     checksums not matching but it recommended that a prominant warning
- *     be emitted in order to help people rapidly identify changed or
- *     corrupted Configuration Files.
- *
- *     Also note that it's possible to modify things like "niccaps",
- *     "toecaps",etc. between processing the Configuration File and telling
- *     the firmware to use the new configuration.  Callers which want to
- *     do this will need to "hand-roll" their own CAPS_CONFIGS commands for
- *     Configuration Files if they want to do this.
- */
-static int
-csio_hw_fw_config_file(struct csio_hw *hw,
-                     unsigned int mtype, unsigned int maddr,
-                     uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum)
-{
-       struct csio_mb  *mbp;
-       struct fw_caps_config_cmd *caps_cmd;
-       int rv = -EINVAL;
-       enum fw_retval ret;
-
-       mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
-       if (!mbp) {
-               CSIO_INC_STATS(hw, n_err_nomem);
-               return -ENOMEM;
-       }
-       /*
-        * Tell the firmware to process the indicated Configuration File.
-        * If there are no errors and the caller has provided return value
-        * pointers for the [fini] section version, checksum and computed
-        * checksum, pass those back to the caller.
-        */
-       caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
-       CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
-       caps_cmd->op_to_write =
-               htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
-                     FW_CMD_REQUEST_F |
-                     FW_CMD_READ_F);
-       caps_cmd->cfvalid_to_len16 =
-               htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
-                     FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
-                     FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
-                     FW_LEN16(*caps_cmd));
-
-       if (csio_mb_issue(hw, mbp)) {
-               csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
-               goto out;
-       }
-
-       ret = csio_mb_fw_retval(mbp);
-       if (ret != FW_SUCCESS) {
-               csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
-               goto out;
-       }
-
-       if (finiver)
-               *finiver = ntohl(caps_cmd->finiver);
-       if (finicsum)
-               *finicsum = ntohl(caps_cmd->finicsum);
-       if (cfcsum)
-               *cfcsum = ntohl(caps_cmd->cfcsum);
-
-       /* Validate device capabilities */
-       if (csio_hw_validate_caps(hw, mbp)) {
-               rv = -ENOENT;
-               goto out;
-       }
-
-       /*
-        * And now tell the firmware to use the configuration we just loaded.
-        */
-       caps_cmd->op_to_write =
-               htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
-                     FW_CMD_REQUEST_F |
-                     FW_CMD_WRITE_F);
-       caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
-
-       if (csio_mb_issue(hw, mbp)) {
-               csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
-               goto out;
-       }
-
-       ret = csio_mb_fw_retval(mbp);
-       if (ret != FW_SUCCESS) {
-               csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
-               goto out;
-       }
-
-       rv = 0;
-out:
-       mempool_free(mbp, hw->mb_mempool);
-       return rv;
-}
-
 /*
  * csio_get_device_params - Get device parameters.
  * @hw: HW module
@@ -1547,7 +1372,8 @@ csio_config_device_caps(struct csio_hw *hw)
        }
 
        /* Validate device capabilities */
-       if (csio_hw_validate_caps(hw, mbp))
+       rv = csio_hw_validate_caps(hw, mbp);
+       if (rv != 0)
                goto out;
 
        /* Don't config device capabilities if already configured */
@@ -1756,9 +1582,9 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
        uint32_t *cfg_data;
        int value_to_add = 0;
 
-       if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) {
+       if (request_firmware(&cf, FW_CFG_NAME_T5, dev) < 0) {
                csio_err(hw, "could not find config file %s, err: %d\n",
-                        CSIO_CF_FNAME(hw), ret);
+                        FW_CFG_NAME_T5, ret);
                return -ENOENT;
        }
 
@@ -1798,8 +1624,8 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
        }
        if (ret == 0) {
                csio_info(hw, "config file upgraded to %s\n",
-                         CSIO_CF_FNAME(hw));
-               snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw));
+                         FW_CFG_NAME_T5);
+               snprintf(path, 64, "%s%s", "/lib/firmware/", FW_CFG_NAME_T5);
        }
 
 leave:
@@ -1827,11 +1653,13 @@ leave:
 static int
 csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
 {
+       struct csio_mb  *mbp = NULL;
+       struct fw_caps_config_cmd *caps_cmd;
        unsigned int mtype, maddr;
-       int rv;
+       int rv = -EINVAL;
        uint32_t finiver = 0, finicsum = 0, cfcsum = 0;
-       int using_flash;
        char path[64];
+       char *config_name = NULL;
 
        /*
         * Reset device if necessary
@@ -1851,51 +1679,107 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
        rv = csio_hw_flash_config(hw, fw_cfg_param, path);
        spin_lock_irq(&hw->lock);
        if (rv != 0) {
-               if (rv == -ENOENT) {
-                       /*
-                        * config file was not found. Use default
-                        * config file from flash.
-                        */
-                       mtype = FW_MEMTYPE_CF_FLASH;
-                       maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
-                       using_flash = 1;
-               } else {
-                       /*
-                        * we revert back to the hardwired config if
-                        * flashing failed.
-                        */
-                       goto bye;
-               }
+               /*
+                * config file was not found. Use default
+                * config file from flash.
+                */
+               config_name = "On FLASH";
+               mtype = FW_MEMTYPE_CF_FLASH;
+               maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
        } else {
+               config_name = path;
                mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
                maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
-               using_flash = 0;
        }
 
-       hw->cfg_store = (uint8_t)mtype;
+       mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+       if (!mbp) {
+               CSIO_INC_STATS(hw, n_err_nomem);
+               return -ENOMEM;
+       }
+       /*
+        * Tell the firmware to process the indicated Configuration File.
+        * If there are no errors and the caller has provided return value
+        * pointers for the [fini] section version, checksum and computed
+        * checksum, pass those back to the caller.
+        */
+       caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
+       CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
+       caps_cmd->op_to_write =
+               htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+                     FW_CMD_REQUEST_F |
+                     FW_CMD_READ_F);
+       caps_cmd->cfvalid_to_len16 =
+               htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
+                     FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
+                     FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
+                     FW_LEN16(*caps_cmd));
+
+       if (csio_mb_issue(hw, mbp)) {
+               rv = -EINVAL;
+               goto bye;
+       }
+
+       rv = csio_mb_fw_retval(mbp);
+        /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
+         * Configuration File in FLASH), our last gasp effort is to use the
+         * Firmware Configuration File which is embedded in the
+         * firmware.  A very few early versions of the firmware didn't
+         * have one embedded but we can ignore those.
+         */
+       if (rv == ENOENT) {
+               CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
+               caps_cmd->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+                                             FW_CMD_REQUEST_F |
+                                             FW_CMD_READ_F);
+               caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
+
+               if (csio_mb_issue(hw, mbp)) {
+                       rv = -EINVAL;
+                       goto bye;
+               }
+
+               rv = csio_mb_fw_retval(mbp);
+               config_name = "Firmware Default";
+       }
+       if (rv != FW_SUCCESS)
+               goto bye;
+
+       finiver = ntohl(caps_cmd->finiver);
+       finicsum = ntohl(caps_cmd->finicsum);
+       cfcsum = ntohl(caps_cmd->cfcsum);
 
        /*
-        * Issue a Capability Configuration command to the firmware to get it
-        * to parse the Configuration File.
+        * And now tell the firmware to use the configuration we just loaded.
         */
-       rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver,
-               &finicsum, &cfcsum);
-       if (rv != 0)
+       caps_cmd->op_to_write =
+               htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+                     FW_CMD_REQUEST_F |
+                     FW_CMD_WRITE_F);
+       caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
+
+       if (csio_mb_issue(hw, mbp)) {
+               rv = -EINVAL;
                goto bye;
+       }
 
-       hw->cfg_finiver         = finiver;
-       hw->cfg_finicsum        = finicsum;
-       hw->cfg_cfcsum          = cfcsum;
-       hw->cfg_csum_status     = true;
+       rv = csio_mb_fw_retval(mbp);
+       if (rv != FW_SUCCESS) {
+               csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
+               goto bye;
+       }
 
+       mempool_free(mbp, hw->mb_mempool);
        if (finicsum != cfcsum) {
                csio_warn(hw,
                      "Config File checksum mismatch: csum=%#x, computed=%#x\n",
                      finicsum, cfcsum);
-
-               hw->cfg_csum_status = false;
        }
 
+       /* Validate device capabilities */
+       rv = csio_hw_validate_caps(hw, mbp);
+       if (rv != 0)
+               goto bye;
        /*
         * Note that we're operating with parameters
         * not supplied by the driver, rather than from hard-wired
@@ -1918,56 +1802,184 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
        /* Post event to notify completion of configuration */
        csio_post_event(&hw->sm, CSIO_HWE_INIT);
 
-       csio_info(hw,
-        "Firmware Configuration File %s, version %#x, computed checksum %#x\n",
-                 (using_flash ? "in device FLASH" : path), finiver, cfcsum);
-
+       csio_info(hw, "Successfully configure using Firmware "
+                 "Configuration File %s, version %#x, computed checksum %#x\n",
+                 config_name, finiver, cfcsum);
        return 0;
 
        /*
         * Something bad happened.  Return the error ...
         */
 bye:
+       if (mbp)
+               mempool_free(mbp, hw->mb_mempool);
        hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS;
-       csio_dbg(hw, "Configuration file error %d\n", rv);
+       csio_warn(hw, "Configuration file error %d\n", rv);
        return rv;
 }
 
-/*
- * Attempt to initialize the adapter via hard-coded, driver supplied
- * parameters ...
+/* Is the given firmware API compatible with the one the driver was compiled
+ * with?
  */
-static int
-csio_hw_no_fwconfig(struct csio_hw *hw, int reset)
+static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
 {
-       int             rv;
-       /*
-        * Reset device if necessary
-        */
-       if (reset) {
-               rv = csio_do_reset(hw, true);
-               if (rv != 0)
-                       goto out;
+
+       /* short circuit if it's the exact same firmware version */
+       if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
+               return 1;
+
+#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
+       if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
+           SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
+               return 1;
+#undef SAME_INTF
+
+       return 0;
+}
+
+/* The firmware in the filesystem is usable, but should it be installed?
+ * This routine explains itself in detail if it indicates the filesystem
+ * firmware should be installed.
+ */
+static int csio_should_install_fs_fw(struct csio_hw *hw, int card_fw_usable,
+                               int k, int c)
+{
+       const char *reason;
+
+       if (!card_fw_usable) {
+               reason = "incompatible or unusable";
+               goto install;
        }
 
-       /* Get and set device capabilities */
-       rv = csio_config_device_caps(hw);
-       if (rv != 0)
-               goto out;
+       if (k > c) {
+               reason = "older than the version supported with this driver";
+               goto install;
+       }
 
-       /* device parameters */
-       rv = csio_get_device_params(hw);
-       if (rv != 0)
-               goto out;
+       return 0;
 
-       /* Configure SGE */
-       csio_wr_sge_init(hw);
+install:
+       csio_err(hw, "firmware on card (%u.%u.%u.%u) is %s, "
+               "installing firmware %u.%u.%u.%u on card.\n",
+               FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+               FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
+               FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+               FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
 
-       /* Post event to notify completion of configuration */
-       csio_post_event(&hw->sm, CSIO_HWE_INIT);
+       return 1;
+}
 
-out:
-       return rv;
+static struct fw_info fw_info_array[] = {
+       {
+               .chip = CHELSIO_T5,
+               .fs_name = FW_CFG_NAME_T5,
+               .fw_mod_name = FW_FNAME_T5,
+               .fw_hdr = {
+                       .chip = FW_HDR_CHIP_T5,
+                       .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
+                       .intfver_nic = FW_INTFVER(T5, NIC),
+                       .intfver_vnic = FW_INTFVER(T5, VNIC),
+                       .intfver_ri = FW_INTFVER(T5, RI),
+                       .intfver_iscsi = FW_INTFVER(T5, ISCSI),
+                       .intfver_fcoe = FW_INTFVER(T5, FCOE),
+               },
+       }
+};
+
+static struct fw_info *find_fw_info(int chip)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
+               if (fw_info_array[i].chip == chip)
+                       return &fw_info_array[i];
+       }
+       return NULL;
+}
+
+static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
+              const u8 *fw_data, unsigned int fw_size,
+              struct fw_hdr *card_fw, enum csio_dev_state state,
+              int *reset)
+{
+       int ret, card_fw_usable, fs_fw_usable;
+       const struct fw_hdr *fs_fw;
+       const struct fw_hdr *drv_fw;
+
+       drv_fw = &fw_info->fw_hdr;
+
+       /* Read the header of the firmware on the card */
+       ret = csio_hw_read_flash(hw, FLASH_FW_START,
+                           sizeof(*card_fw) / sizeof(uint32_t),
+                           (uint32_t *)card_fw, 1);
+       if (ret == 0) {
+               card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
+       } else {
+               csio_err(hw,
+                       "Unable to read card's firmware header: %d\n", ret);
+               card_fw_usable = 0;
+       }
+
+       if (fw_data != NULL) {
+               fs_fw = (const void *)fw_data;
+               fs_fw_usable = fw_compatible(drv_fw, fs_fw);
+       } else {
+               fs_fw = NULL;
+               fs_fw_usable = 0;
+       }
+
+       if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
+           (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
+               /* Common case: the firmware on the card is an exact match and
+                * the filesystem one is an exact match too, or the filesystem
+                * one is absent/incompatible.
+                */
+       } else if (fs_fw_usable && state == CSIO_DEV_STATE_UNINIT &&
+                  csio_should_install_fs_fw(hw, card_fw_usable,
+                                       be32_to_cpu(fs_fw->fw_ver),
+                                       be32_to_cpu(card_fw->fw_ver))) {
+               ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data,
+                                    fw_size, 0);
+               if (ret != 0) {
+                       csio_err(hw,
+                               "failed to install firmware: %d\n", ret);
+                       goto bye;
+               }
+
+               /* Installed successfully, update the cached header too. */
+               memcpy(card_fw, fs_fw, sizeof(*card_fw));
+               card_fw_usable = 1;
+               *reset = 0;     /* already reset as part of load_fw */
+       }
+
+       if (!card_fw_usable) {
+               uint32_t d, c, k;
+
+               d = be32_to_cpu(drv_fw->fw_ver);
+               c = be32_to_cpu(card_fw->fw_ver);
+               k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
+
+               csio_err(hw, "Cannot find a usable firmware: "
+                       "chip state %d, "
+                       "driver compiled with %d.%d.%d.%d, "
+                       "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
+                       state,
+                       FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
+                       FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
+                       FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+                       FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
+                       FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+                       FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
+               ret = EINVAL;
+               goto bye;
+       }
+
+       /* We're using whatever's on the card and it's known to be good. */
+       hw->fwrev = be32_to_cpu(card_fw->fw_ver);
+       hw->tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
+
+bye:
+       return ret;
 }
 
 /*
@@ -1977,48 +1989,52 @@ out:
  * latest firmware ECANCELED is returned
  */
 static int
-csio_hw_flash_fw(struct csio_hw *hw)
+csio_hw_flash_fw(struct csio_hw *hw, int *reset)
 {
        int ret = -ECANCELED;
        const struct firmware *fw;
-       const struct fw_hdr *hdr;
-       u32 fw_ver;
+       struct fw_info *fw_info;
+       struct fw_hdr *card_fw;
        struct pci_dev *pci_dev = hw->pdev;
        struct device *dev = &pci_dev->dev ;
+       const u8 *fw_data = NULL;
+       unsigned int fw_size = 0;
 
-       if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) {
-               csio_err(hw, "could not find firmware image %s, err: %d\n",
-                        CSIO_FW_FNAME(hw), ret);
+       /* This is the firmware whose headers the driver was compiled
+        * against
+        */
+       fw_info = find_fw_info(CHELSIO_CHIP_VERSION(hw->chip_id));
+       if (fw_info == NULL) {
+               csio_err(hw,
+                       "unable to get firmware info for chip %d.\n",
+                       CHELSIO_CHIP_VERSION(hw->chip_id));
                return -EINVAL;
        }
 
-       hdr = (const struct fw_hdr *)fw->data;
-       fw_ver = ntohl(hdr->fw_ver);
-       if (FW_HDR_FW_VER_MAJOR_G(fw_ver) != FW_VERSION_MAJOR(hw))
-               return -EINVAL;      /* wrong major version, won't do */
+       if (request_firmware(&fw, FW_FNAME_T5, dev) < 0) {
+               csio_err(hw, "could not find firmware image %s, err: %d\n",
+                        FW_FNAME_T5, ret);
+       } else {
+               fw_data = fw->data;
+               fw_size = fw->size;
+       }
 
-       /*
-        * If the flash FW is unusable or we found something newer, load it.
+       /* allocate memory to read the header of the firmware on the
+        * card
         */
-       if (FW_HDR_FW_VER_MAJOR_G(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
-           fw_ver > hw->fwrev) {
-               ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
-                                   /*force=*/false);
-               if (!ret)
-                       csio_info(hw,
-                                 "firmware upgraded to version %pI4 from %s\n",
-                                 &hdr->fw_ver, CSIO_FW_FNAME(hw));
-               else
-                       csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
-       } else
-               ret = -EINVAL;
+       card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
 
-       release_firmware(fw);
+       /* upgrade FW logic */
+       ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
+                        hw->fw_state, reset);
 
+       /* Cleaning up */
+       if (fw != NULL)
+               release_firmware(fw);
+       kfree(card_fw);
        return ret;
 }
 
-
 /*
  * csio_hw_configure - Configure HW
  * @hw - HW module
@@ -2039,7 +2055,7 @@ csio_hw_configure(struct csio_hw *hw)
        }
 
        /* HW version */
-       hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV);
+       hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A);
 
        /* Needed for FW download */
        rv = csio_hw_get_flash_params(hw);
@@ -2074,50 +2090,43 @@ csio_hw_configure(struct csio_hw *hw)
        if (rv != 0)
                goto out;
 
+       csio_hw_get_fw_version(hw, &hw->fwrev);
+       csio_hw_get_tp_version(hw, &hw->tp_vers);
        if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
-               rv = csio_hw_check_fw_version(hw);
-               if (rv == -EINVAL) {
 
                        /* Do firmware update */
-                       spin_unlock_irq(&hw->lock);
-                       rv = csio_hw_flash_fw(hw);
-                       spin_lock_irq(&hw->lock);
+               spin_unlock_irq(&hw->lock);
+               rv = csio_hw_flash_fw(hw, &reset);
+               spin_lock_irq(&hw->lock);
 
-                       if (rv == 0) {
-                               reset = 0;
-                               /*
-                                * Note that the chip was reset as part of the
-                                * firmware upgrade so we don't reset it again
-                                * below and grab the new firmware version.
-                                */
-                               rv = csio_hw_check_fw_version(hw);
-                       }
-               }
-               /*
-                * If the firmware doesn't support Configuration
-                * Files, use the old Driver-based, hard-wired
-                * initialization.  Otherwise, try using the
-                * Configuration File support and fall back to the
-                * Driver-based initialization if there's no
-                * Configuration File found.
+               if (rv != 0)
+                       goto out;
+
+               /* If the firmware doesn't support Configuration Files,
+                * return an error.
                 */
-               if (csio_hw_check_fwconfig(hw, param) == 0) {
-                       rv = csio_hw_use_fwconfig(hw, reset, param);
-                       if (rv == -ENOENT)
-                               goto out;
-                       if (rv != 0) {
-                               csio_info(hw,
-                                   "No Configuration File present "
-                                   "on adapter.  Using hard-wired "
-                                   "configuration parameters.\n");
-                               rv = csio_hw_no_fwconfig(hw, reset);
-                       }
-               } else {
-                       rv = csio_hw_no_fwconfig(hw, reset);
+               rv = csio_hw_check_fwconfig(hw, param);
+               if (rv != 0) {
+                       csio_info(hw, "Firmware doesn't support "
+                                 "Firmware Configuration files\n");
+                       goto out;
                }
 
-               if (rv != 0)
+               /* The firmware provides us with a memory buffer where we can
+                * load a Configuration File from the host if we want to
+                * override the Configuration File in flash.
+                */
+               rv = csio_hw_use_fwconfig(hw, reset, param);
+               if (rv == -ENOENT) {
+                       csio_info(hw, "Could not initialize "
+                                 "adapter, error%d\n", rv);
+                       goto out;
+               }
+               if (rv != 0) {
+                       csio_info(hw, "Could not initialize "
+                                 "adapter, error%d\n", rv);
                        goto out;
+               }
 
        } else {
                if (hw->fw_state == CSIO_DEV_STATE_INIT) {
@@ -2217,7 +2226,7 @@ out:
        return;
 }
 
-#define PF_INTR_MASK (PFSW | PFCIM)
+#define PF_INTR_MASK (PFSW_F | PFCIM_F)
 
 /*
  * csio_hw_intr_enable - Enable HW interrupts
@@ -2229,21 +2238,21 @@ static void
 csio_hw_intr_enable(struct csio_hw *hw)
 {
        uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
-       uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
-       uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE);
+       uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+       uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A);
 
        /*
         * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
         * by FW, so do nothing for INTX.
         */
        if (hw->intr_mode == CSIO_IM_MSIX)
-               csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
-                                  AIVEC(AIVEC_MASK), vec);
+               csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
+                                  AIVEC_V(AIVEC_M), vec);
        else if (hw->intr_mode == CSIO_IM_MSI)
-               csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
-                                  AIVEC(AIVEC_MASK), 0);
+               csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
+                                  AIVEC_V(AIVEC_M), 0);
 
-       csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE));
+       csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A));
 
        /* Turn on MB interrupts - this will internally flush PIO as well */
        csio_mb_intr_enable(hw);
@@ -2253,19 +2262,19 @@ csio_hw_intr_enable(struct csio_hw *hw)
                /*
                 * Disable the Serial FLASH interrupt, if enabled!
                 */
-               pl &= (~SF);
-               csio_wr_reg32(hw, pl, PL_INT_ENABLE);
+               pl &= (~SF_F);
+               csio_wr_reg32(hw, pl, PL_INT_ENABLE_A);
 
-               csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE |
-                             EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC |
-                             ERR_CPL_OPCODE_0 | ERR_DROPPED_DB |
-                             ERR_DATA_CPL_ON_HIGH_QID1 |
-                             ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
-                             ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
-                             ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
-                             ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR,
-                             SGE_INT_ENABLE3);
-               csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf);
+               csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F |
+                             EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F |
+                             ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F |
+                             ERR_DATA_CPL_ON_HIGH_QID1_F |
+                             ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
+                             ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
+                             ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
+                             ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F,
+                             SGE_INT_ENABLE3_A);
+               csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf);
        }
 
        hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
@@ -2281,16 +2290,16 @@ csio_hw_intr_enable(struct csio_hw *hw)
 void
 csio_hw_intr_disable(struct csio_hw *hw)
 {
-       uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
+       uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
 
        if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
                return;
 
        hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
 
-       csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE));
+       csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A));
        if (csio_is_hw_master(hw))
-               csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0);
+               csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0);
 
        /* Turn off MB interrupts */
        csio_mb_intr_disable(hw);
@@ -2300,7 +2309,7 @@ csio_hw_intr_disable(struct csio_hw *hw)
 void
 csio_hw_fatal_err(struct csio_hw *hw)
 {
-       csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);
+       csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0);
        csio_hw_intr_disable(hw);
 
        /* Do not reset HW, we may need FW state for debugging */
@@ -2594,7 +2603,7 @@ csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
                 * register directly.
                 */
                csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
-               csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+               csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
                mdelay(2000);
                break;
 
@@ -2682,11 +2691,11 @@ static void csio_tp_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info tp_intr_info[] = {
                { 0x3fffffff, "TP parity error", -1, 1 },
-               { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+               { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info))
+       if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -2698,52 +2707,52 @@ static void csio_sge_intr_handler(struct csio_hw *hw)
        uint64_t v;
 
        static struct intr_info sge_intr_info[] = {
-               { ERR_CPL_EXCEED_IQE_SIZE,
+               { ERR_CPL_EXCEED_IQE_SIZE_F,
                  "SGE received CPL exceeding IQE size", -1, 1 },
-               { ERR_INVALID_CIDX_INC,
+               { ERR_INVALID_CIDX_INC_F,
                  "SGE GTS CIDX increment too large", -1, 0 },
-               { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
-               { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
-               { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
+               { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
+               { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 },
+               { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
                  "SGE IQID > 1023 received CPL for FL", -1, 0 },
-               { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
                  0 },
-               { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
                  0 },
-               { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
                  0 },
-               { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+               { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
                  0 },
-               { ERR_ING_CTXT_PRIO,
+               { ERR_ING_CTXT_PRIO_F,
                  "SGE too many priority ingress contexts", -1, 0 },
-               { ERR_EGR_CTXT_PRIO,
+               { ERR_EGR_CTXT_PRIO_F,
                  "SGE too many priority egress contexts", -1, 0 },
-               { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
-               { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+               { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
+               { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
                { 0, NULL, 0, 0 }
        };
 
-       v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) |
-           ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32);
+       v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) |
+           ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32);
        if (v) {
                csio_fatal(hw, "SGE parity error (%#llx)\n",
                            (unsigned long long)v);
                csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
-                                               SGE_INT_CAUSE1);
-               csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2);
+                                               SGE_INT_CAUSE1_A);
+               csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A);
        }
 
-       v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info);
+       v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info);
 
-       if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) ||
+       if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) ||
            v != 0)
                csio_hw_fatal_err(hw);
 }
 
-#define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\
-                     OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR)
-#define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\
-                     IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR)
+#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
+                     OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
+#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
+                     IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
 
 /*
  * CIM interrupt handler.
@@ -2751,53 +2760,53 @@ static void csio_sge_intr_handler(struct csio_hw *hw)
 static void csio_cim_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info cim_intr_info[] = {
-               { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
+               { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
                { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
                { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
-               { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
-               { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
-               { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
-               { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+               { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
+               { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
+               { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
+               { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
        static struct intr_info cim_upintr_info[] = {
-               { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
-               { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
-               { ILLWRINT, "CIM illegal write", -1, 1 },
-               { ILLRDINT, "CIM illegal read", -1, 1 },
-               { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
-               { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
-               { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
-               { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
-               { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
-               { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
-               { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
-               { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
-               { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
-               { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
-               { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
-               { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
-               { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
-               { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
-               { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
-               { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
-               { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
-               { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
-               { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
-               { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
-               { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
-               { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
-               { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
-               { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
+               { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
+               { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
+               { ILLWRINT_F, "CIM illegal write", -1, 1 },
+               { ILLRDINT_F, "CIM illegal read", -1, 1 },
+               { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
+               { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
+               { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
+               { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
+               { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
+               { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
+               { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
+               { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
+               { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
+               { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
+               { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
+               { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
+               { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
+               { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
+               { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
+               { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
+               { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
+               { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
+               { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
+               { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
+               { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
+               { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
+               { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
+               { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
        int fat;
 
-       fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE,
-                                   cim_intr_info) +
-             csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE,
-                                   cim_upintr_info);
+       fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A,
+                                     cim_intr_info) +
+             csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A,
+                                     cim_upintr_info);
        if (fat)
                csio_hw_fatal_err(hw);
 }
@@ -2813,7 +2822,7 @@ static void csio_ulprx_intr_handler(struct csio_hw *hw)
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info))
+       if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -2823,19 +2832,19 @@ static void csio_ulprx_intr_handler(struct csio_hw *hw)
 static void csio_ulptx_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info ulptx_intr_info[] = {
-               { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
                  0 },
-               { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
                  0 },
-               { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
                  0 },
-               { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+               { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
                  0 },
                { 0xfffffff, "ULPTX parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info))
+       if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -2845,20 +2854,20 @@ static void csio_ulptx_intr_handler(struct csio_hw *hw)
 static void csio_pmtx_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info pmtx_intr_info[] = {
-               { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
-               { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
-               { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
-               { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
+               { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
+               { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
+               { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
+               { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
                { 0xffffff0, "PMTX framing error", -1, 1 },
-               { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
-               { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
+               { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
+               { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1,
                  1 },
-               { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
-               { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+               { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
+               { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info))
+       if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -2868,17 +2877,17 @@ static void csio_pmtx_intr_handler(struct csio_hw *hw)
 static void csio_pmrx_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info pmrx_intr_info[] = {
-               { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
+               { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
                { 0x3ffff0, "PMRX framing error", -1, 1 },
-               { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
-               { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
+               { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
+               { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1,
                  1 },
-               { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
-               { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+               { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
+               { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info))
+       if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -2888,16 +2897,16 @@ static void csio_pmrx_intr_handler(struct csio_hw *hw)
 static void csio_cplsw_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info cplsw_intr_info[] = {
-               { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
-               { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
-               { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
-               { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
-               { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
-               { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+               { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
+               { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
+               { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
+               { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
+               { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
+               { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info))
+       if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -2907,15 +2916,15 @@ static void csio_cplsw_intr_handler(struct csio_hw *hw)
 static void csio_le_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info le_intr_info[] = {
-               { LIPMISS, "LE LIP miss", -1, 0 },
-               { LIP0, "LE 0 LIP error", -1, 0 },
-               { PARITYERR, "LE parity error", -1, 1 },
-               { UNKNOWNCMD, "LE unknown command", -1, 1 },
-               { REQQPARERR, "LE request queue parity error", -1, 1 },
+               { LIPMISS_F, "LE LIP miss", -1, 0 },
+               { LIP0_F, "LE 0 LIP error", -1, 0 },
+               { PARITYERR_F, "LE parity error", -1, 1 },
+               { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+               { REQQPARERR_F, "LE request queue parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info))
+       if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, le_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -2929,19 +2938,22 @@ static void csio_mps_intr_handler(struct csio_hw *hw)
                { 0, NULL, 0, 0 }
        };
        static struct intr_info mps_tx_intr_info[] = {
-               { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
-               { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
-               { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
-               { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
-               { BUBBLE, "MPS Tx underflow", -1, 1 },
-               { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
-               { FRMERR, "MPS Tx framing error", -1, 1 },
+               { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
+               { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+               { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
+                 -1, 1 },
+               { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
+                 -1, 1 },
+               { BUBBLE_F, "MPS Tx underflow", -1, 1 },
+               { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
+               { FRMERR_F, "MPS Tx framing error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
        static struct intr_info mps_trc_intr_info[] = {
-               { FILTMEM, "MPS TRC filter parity error", -1, 1 },
-               { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
-               { MISCPERR, "MPS TRC misc parity error", -1, 1 },
+               { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
+               { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
+                 -1, 1 },
+               { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
        static struct intr_info mps_stat_sram_intr_info[] = {
@@ -2957,36 +2969,37 @@ static void csio_mps_intr_handler(struct csio_hw *hw)
                { 0, NULL, 0, 0 }
        };
        static struct intr_info mps_cls_intr_info[] = {
-               { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
-               { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
-               { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+               { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
+               { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
+               { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
        int fat;
 
-       fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE,
-                                   mps_rx_intr_info) +
-             csio_handle_intr_status(hw, MPS_TX_INT_CAUSE,
-                                   mps_tx_intr_info) +
-             csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE,
-                                   mps_trc_intr_info) +
-             csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM,
-                                   mps_stat_sram_intr_info) +
-             csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
-                                   mps_stat_tx_intr_info) +
-             csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
-                                   mps_stat_rx_intr_info) +
-             csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE,
-                                   mps_cls_intr_info);
-
-       csio_wr_reg32(hw, 0, MPS_INT_CAUSE);
-       csio_rd_reg32(hw, MPS_INT_CAUSE);                    /* flush */
+       fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A,
+                                     mps_rx_intr_info) +
+             csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A,
+                                     mps_tx_intr_info) +
+             csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A,
+                                     mps_trc_intr_info) +
+             csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
+                                     mps_stat_sram_intr_info) +
+             csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
+                                     mps_stat_tx_intr_info) +
+             csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
+                                     mps_stat_rx_intr_info) +
+             csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A,
+                                     mps_cls_intr_info);
+
+       csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A);
+       csio_rd_reg32(hw, MPS_INT_CAUSE_A);                    /* flush */
        if (fat)
                csio_hw_fatal_err(hw);
 }
 
-#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
+#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
+                     ECC_UE_INT_CAUSE_F)
 
 /*
  * EDC/MC interrupt handler.
@@ -2998,28 +3011,28 @@ static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
        unsigned int addr, cnt_addr, v;
 
        if (idx <= MEM_EDC1) {
-               addr = EDC_REG(EDC_INT_CAUSE, idx);
-               cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+               addr = EDC_REG(EDC_INT_CAUSE_A, idx);
+               cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
        } else {
-               addr = MC_INT_CAUSE;
-               cnt_addr = MC_ECC_STATUS;
+               addr = MC_INT_CAUSE_A;
+               cnt_addr = MC_ECC_STATUS_A;
        }
 
        v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
-       if (v & PERR_INT_CAUSE)
+       if (v & PERR_INT_CAUSE_F)
                csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
-       if (v & ECC_CE_INT_CAUSE) {
-               uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr));
+       if (v & ECC_CE_INT_CAUSE_F) {
+               uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr));
 
-               csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr);
+               csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr);
                csio_warn(hw, "%u %s correctable ECC data error%s\n",
                            cnt, name[idx], cnt > 1 ? "s" : "");
        }
-       if (v & ECC_UE_INT_CAUSE)
+       if (v & ECC_UE_INT_CAUSE_F)
                csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
 
        csio_wr_reg32(hw, v, addr);
-       if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
+       if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
                csio_hw_fatal_err(hw);
 }
 
@@ -3028,18 +3041,18 @@ static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
  */
 static void csio_ma_intr_handler(struct csio_hw *hw)
 {
-       uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE);
+       uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A);
 
-       if (status & MEM_PERR_INT_CAUSE)
+       if (status & MEM_PERR_INT_CAUSE_F)
                csio_fatal(hw, "MA parity error, parity status %#x\n",
-                           csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS));
-       if (status & MEM_WRAP_INT_CAUSE) {
-               v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS);
+                           csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A));
+       if (status & MEM_WRAP_INT_CAUSE_F) {
+               v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A);
                csio_fatal(hw,
                   "MA address wrap-around error by client %u to address %#x\n",
-                  MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4);
+                  MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4);
        }
-       csio_wr_reg32(hw, status, MA_INT_CAUSE);
+       csio_wr_reg32(hw, status, MA_INT_CAUSE_A);
        csio_hw_fatal_err(hw);
 }
 
@@ -3049,13 +3062,13 @@ static void csio_ma_intr_handler(struct csio_hw *hw)
 static void csio_smb_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info smb_intr_info[] = {
-               { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
-               { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
-               { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+               { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
+               { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
+               { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info))
+       if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -3065,14 +3078,14 @@ static void csio_smb_intr_handler(struct csio_hw *hw)
 static void csio_ncsi_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info ncsi_intr_info[] = {
-               { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
-               { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
-               { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
-               { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+               { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
+               { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
+               { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
+               { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info))
+       if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -3081,17 +3094,17 @@ static void csio_ncsi_intr_handler(struct csio_hw *hw)
  */
 static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
 {
-       uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
+       uint32_t v = csio_rd_reg32(hw, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A));
 
-       v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
+       v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
        if (!v)
                return;
 
-       if (v & TXFIFO_PRTY_ERR)
+       if (v & TXFIFO_PRTY_ERR_F)
                csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
-       if (v & RXFIFO_PRTY_ERR)
+       if (v & RXFIFO_PRTY_ERR_F)
                csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
-       csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port));
+       csio_wr_reg32(hw, v, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A));
        csio_hw_fatal_err(hw);
 }
 
@@ -3101,12 +3114,12 @@ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
 static void csio_pl_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info pl_intr_info[] = {
-               { FATALPERR, "T4 fatal parity error", -1, 1 },
-               { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+               { FATALPERR_F, "T4 fatal parity error", -1, 1 },
+               { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
-       if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info))
+       if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info))
                csio_hw_fatal_err(hw);
 }
 
@@ -3121,7 +3134,7 @@ static void csio_pl_intr_handler(struct csio_hw *hw)
 int
 csio_hw_slow_intr_handler(struct csio_hw *hw)
 {
-       uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE);
+       uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A);
 
        if (!(cause & CSIO_GLBL_INTR_MASK)) {
                CSIO_INC_STATS(hw, n_plint_unexp);
@@ -3132,75 +3145,75 @@ csio_hw_slow_intr_handler(struct csio_hw *hw)
 
        CSIO_INC_STATS(hw, n_plint_cnt);
 
-       if (cause & CIM)
+       if (cause & CIM_F)
                csio_cim_intr_handler(hw);
 
-       if (cause & MPS)
+       if (cause & MPS_F)
                csio_mps_intr_handler(hw);
 
-       if (cause & NCSI)
+       if (cause & NCSI_F)
                csio_ncsi_intr_handler(hw);
 
-       if (cause & PL)
+       if (cause & PL_F)
                csio_pl_intr_handler(hw);
 
-       if (cause & SMB)
+       if (cause & SMB_F)
                csio_smb_intr_handler(hw);
 
-       if (cause & XGMAC0)
+       if (cause & XGMAC0_F)
                csio_xgmac_intr_handler(hw, 0);
 
-       if (cause & XGMAC1)
+       if (cause & XGMAC1_F)
                csio_xgmac_intr_handler(hw, 1);
 
-       if (cause & XGMAC_KR0)
+       if (cause & XGMAC_KR0_F)
                csio_xgmac_intr_handler(hw, 2);
 
-       if (cause & XGMAC_KR1)
+       if (cause & XGMAC_KR1_F)
                csio_xgmac_intr_handler(hw, 3);
 
-       if (cause & PCIE)
+       if (cause & PCIE_F)
                hw->chip_ops->chip_pcie_intr_handler(hw);
 
-       if (cause & MC)
+       if (cause & MC_F)
                csio_mem_intr_handler(hw, MEM_MC);
 
-       if (cause & EDC0)
+       if (cause & EDC0_F)
                csio_mem_intr_handler(hw, MEM_EDC0);
 
-       if (cause & EDC1)
+       if (cause & EDC1_F)
                csio_mem_intr_handler(hw, MEM_EDC1);
 
-       if (cause & LE)
+       if (cause & LE_F)
                csio_le_intr_handler(hw);
 
-       if (cause & TP)
+       if (cause & TP_F)
                csio_tp_intr_handler(hw);
 
-       if (cause & MA)
+       if (cause & MA_F)
                csio_ma_intr_handler(hw);
 
-       if (cause & PM_TX)
+       if (cause & PM_TX_F)
                csio_pmtx_intr_handler(hw);
 
-       if (cause & PM_RX)
+       if (cause & PM_RX_F)
                csio_pmrx_intr_handler(hw);
 
-       if (cause & ULP_RX)
+       if (cause & ULP_RX_F)
                csio_ulprx_intr_handler(hw);
 
-       if (cause & CPL_SWITCH)
+       if (cause & CPL_SWITCH_F)
                csio_cplsw_intr_handler(hw);
 
-       if (cause & SGE)
+       if (cause & SGE_F)
                csio_sge_intr_handler(hw);
 
-       if (cause & ULP_TX)
+       if (cause & ULP_TX_F)
                csio_ulptx_intr_handler(hw);
 
        /* Clear the interrupts just processed for which we are the master. */
-       csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE);
-       csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */
+       csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A);
+       csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */
 
        return 1;
 }
@@ -3840,13 +3853,7 @@ csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
                prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
                adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
 
-               if (prot_type == CSIO_T4_FCOE_ASIC) {
-                       memcpy(hw->hw_ver,
-                              csio_t4_fcoe_adapters[adap_type].model_no, 16);
-                       memcpy(hw->model_desc,
-                              csio_t4_fcoe_adapters[adap_type].description,
-                              32);
-               } else if (prot_type == CSIO_T5_FCOE_ASIC) {
+               if (prot_type == CSIO_T5_FCOE_ASIC) {
                        memcpy(hw->hw_ver,
                               csio_t5_fcoe_adapters[adap_type].model_no, 16);
                        memcpy(hw->model_desc,
@@ -3883,8 +3890,8 @@ csio_hw_init(struct csio_hw *hw)
 
        strcpy(hw->name, CSIO_HW_NAME);
 
-       /* Initialize the HW chip ops with T4/T5 specific ops */
-       hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops;
+       /* Initialize the HW chip ops T5 specific ops */
+       hw->chip_ops = &t5_ops;
 
        /* Set the model & its description */
 
index 68248da1b9afcaf7f0cdb7ca5c249070c6c7cb3e..029bef82c0576ebe66d14f13b705b63197fedd76 100644 (file)
@@ -48,6 +48,7 @@
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "t4_hw.h"
 #include "csio_hw_chip.h"
 #include "csio_wr.h"
 #include "csio_mb.h"
@@ -117,10 +118,10 @@ extern int csio_msi;
 #define CSIO_ASIC_DEVID_PROTO_MASK             0xFF00
 #define CSIO_ASIC_DEVID_TYPE_MASK              0x00FF
 
-#define CSIO_GLBL_INTR_MASK            (CIM | MPS | PL | PCIE | MC | EDC0 | \
-                                        EDC1 | LE | TP | MA | PM_TX | PM_RX | \
-                                        ULP_RX | CPL_SWITCH | SGE | \
-                                        ULP_TX | SF)
+#define CSIO_GLBL_INTR_MASK    (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | \
+                                EDC0_F | EDC1_F | LE_F | TP_F | MA_F | \
+                                PM_TX_F | PM_RX_F | ULP_RX_F | \
+                                CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
 
 /*
  * Hard parameters used to initialize the card in the absence of a
@@ -174,16 +175,12 @@ struct csio_evt_msg {
 };
 
 enum {
-       EEPROMVSIZE    = 32768, /* Serial EEPROM virtual address space size */
        SERNUM_LEN     = 16,    /* Serial # length */
        EC_LEN         = 16,    /* E/C length */
        ID_LEN         = 16,    /* ID length */
-       TRACE_LEN      = 112,   /* length of trace data and mask */
 };
 
 enum {
-       SF_PAGE_SIZE = 256,           /* serial flash page size */
-       SF_SEC_SIZE = 64 * 1024,      /* serial flash sector size */
        SF_SIZE = SF_SEC_SIZE * 16,   /* serial flash size */
 };
 
@@ -199,39 +196,8 @@ enum {
        SF_RD_DATA_FAST = 0xb,        /* read flash */
        SF_RD_ID        = 0x9f,       /* read ID */
        SF_ERASE_SECTOR = 0xd8,       /* erase sector */
-
-       FW_START_SEC = 8,             /* first flash sector for FW */
-       FW_END_SEC = 15,              /* last flash sector for FW */
-       FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
-       FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
-
-       FLASH_CFG_MAX_SIZE    = 0x10000 , /* max size of the flash config file*/
-       FLASH_CFG_OFFSET      = 0x1f0000,
-       FLASH_CFG_START_SEC   = FLASH_CFG_OFFSET / SF_SEC_SIZE,
 };
 
-/*
- * Flash layout.
- */
-#define FLASH_START(start)     ((start) * SF_SEC_SIZE)
-#define FLASH_MAX_SIZE(nsecs)  ((nsecs) * SF_SEC_SIZE)
-
-enum {
-       /*
-        * Location of firmware image in FLASH.
-        */
-       FLASH_FW_START_SEC = 8,
-       FLASH_FW_NSECS = 8,
-       FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
-       FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
-
-       /* Location of Firmware Configuration File in FLASH. */
-       FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
-};
-
-#undef FLASH_START
-#undef FLASH_MAX_SIZE
-
 /* Management module */
 enum {
        CSIO_MGMT_EQ_WRSIZE = 512,
@@ -482,11 +448,6 @@ struct csio_hw {
        uint32_t                tp_vers;
        char                    chip_ver;
        uint16_t                chip_id;                /* Tells T4/T5 chip */
-       uint32_t                cfg_finiver;
-       uint32_t                cfg_finicsum;
-       uint32_t                cfg_cfcsum;
-       uint8_t                 cfg_csum_status;
-       uint8_t                 cfg_store;
        enum csio_dev_state     fw_state;
        struct csio_vpd         vpd;
 
index 4752fed476dfb293e04d5860e1fad2f6116f9327..b56a11d817be32318976b15111dfcee8b9e58bcb 100644 (file)
 #include "csio_defs.h"
 
 /* Define MACRO values */
-#define CSIO_HW_T4                             0x4000
-#define CSIO_T4_FCOE_ASIC                      0x4600
 #define CSIO_HW_T5                             0x5000
 #define CSIO_T5_FCOE_ASIC                      0x5600
 #define CSIO_HW_CHIP_MASK                      0xF000
 
-#define T4_REGMAP_SIZE                         (160 * 1024)
 #define T5_REGMAP_SIZE                         (332 * 1024)
-#define FW_FNAME_T4                            "cxgb4/t4fw.bin"
 #define FW_FNAME_T5                            "cxgb4/t5fw.bin"
-#define FW_CFG_NAME_T4                         "cxgb4/t4-config.txt"
 #define FW_CFG_NAME_T5                         "cxgb4/t5-config.txt"
 
-/* Define static functions */
-static inline int csio_is_t4(uint16_t chip)
-{
-       return (chip == CSIO_HW_T4);
-}
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_FPGA          0x100
+#define CHELSIO_CHIP_VERSION(code) (((code) >> 12) & 0xf)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+#define CHELSIO_T5             0x5
+
+enum chip_type {
+       T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+       T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
+       T5_FIRST_REV    = T5_A0,
+       T5_LAST_REV     = T5_A1,
+};
 
 static inline int csio_is_t5(uint16_t chip)
 {
@@ -65,30 +68,22 @@ static inline int csio_is_t5(uint16_t chip)
 #define CSIO_DEVICE(devid, idx)                                                \
        { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
 
-#define CSIO_HW_PIDX(hw, index)                                                \
-       (csio_is_t4(hw->chip_id) ? (PIDX(index)) :                      \
-                                       (PIDX_T5(index) | DBTYPE(1U)))
-
-#define CSIO_HW_LP_INT_THRESH(hw, val)                                 \
-       (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH(val)) :               \
-                                       (V_LP_INT_THRESH_T5(val)))
-
-#define CSIO_HW_M_LP_INT_THRESH(hw)                                    \
-       (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_MASK) : (M_LP_INT_THRESH_T5))
-
-#define CSIO_MAC_INT_CAUSE_REG(hw, port)                               \
-       (csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE)) : \
-                               (T5_PORT_REG(port, MAC_PORT_INT_CAUSE)))
-
-#define FW_VERSION_MAJOR(hw) (csio_is_t4(hw->chip_id) ? 1 : 0)
-#define FW_VERSION_MINOR(hw) (csio_is_t4(hw->chip_id) ? 2 : 0)
-#define FW_VERSION_MICRO(hw) (csio_is_t4(hw->chip_id) ? 8 : 0)
-
-#define CSIO_FW_FNAME(hw)                                              \
-       (csio_is_t4(hw->chip_id) ? FW_FNAME_T4 : FW_FNAME_T5)
-
-#define CSIO_CF_FNAME(hw)                                              \
-       (csio_is_t4(hw->chip_id) ? FW_CFG_NAME_T4 : FW_CFG_NAME_T5)
+#include "t4fw_api.h"
+#include "t4fw_version.h"
+
+#define FW_VERSION(chip) ( \
+               FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
+               FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
+               FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \
+               FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD))
+#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
+
+struct fw_info {
+       u8 chip;
+       char *fs_name;
+       char *fw_mod_name;
+       struct fw_hdr fw_hdr;
+};
 
 /* Declare ENUMS */
 enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
@@ -96,7 +91,6 @@ enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
 enum {
        MEMWIN_APERTURE = 2048,
        MEMWIN_BASE     = 0x1b800,
-       MEMWIN_CSIOSTOR = 6,            /* PCI-e Memory Window access */
 };
 
 /* Slow path handlers */
@@ -122,7 +116,6 @@ struct csio_hw_chip_ops {
        void (*chip_dfs_create_ext_mem)(struct csio_hw *);
 };
 
-extern struct csio_hw_chip_ops t4_ops;
 extern struct csio_hw_chip_ops t5_ops;
 
 #endif /* #ifndef __CSIO_HW_CHIP_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw_t4.c b/drivers/scsi/csiostor/csio_hw_t4.c
deleted file mode 100644 (file)
index 95d8318..0000000
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * This file is part of the Chelsio FCoE driver for Linux.
- *
- * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "csio_hw.h"
-#include "csio_init.h"
-
-/*
- * Return the specified PCI-E Configuration Space register from our Physical
- * Function.  We try first via a Firmware LDST Command since we prefer to let
- * the firmware own all of these registers, but if that fails we go for it
- * directly ourselves.
- */
-static uint32_t
-csio_t4_read_pcie_cfg4(struct csio_hw *hw, int reg)
-{
-       u32 val = 0;
-       struct csio_mb *mbp;
-       int rv;
-       struct fw_ldst_cmd *ldst_cmd;
-
-       mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
-       if (!mbp) {
-               CSIO_INC_STATS(hw, n_err_nomem);
-               pci_read_config_dword(hw->pdev, reg, &val);
-               return val;
-       }
-
-       csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg);
-       rv = csio_mb_issue(hw, mbp);
-
-       /*
-        * If the LDST Command suucceeded, exctract the returned register
-        * value.  Otherwise read it directly ourself.
-        */
-       if (rv == 0) {
-               ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
-               val = ntohl(ldst_cmd->u.pcie.data[0]);
-       } else
-               pci_read_config_dword(hw->pdev, reg, &val);
-
-       mempool_free(mbp, hw->mb_mempool);
-
-       return val;
-}
-
-static int
-csio_t4_set_mem_win(struct csio_hw *hw, uint32_t win)
-{
-       u32 bar0;
-       u32 mem_win_base;
-
-       /*
-        * Truncation intentional: we only read the bottom 32-bits of the
-        * 64-bit BAR0/BAR1 ...  We use the hardware backdoor mechanism to
-        * read BAR0 instead of using pci_resource_start() because we could be
-        * operating from within a Virtual Machine which is trapping our
-        * accesses to our Configuration Space and we need to set up the PCI-E
-        * Memory Window decoders with the actual addresses which will be
-        * coming across the PCI-E link.
-        */
-       bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
-       bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
-
-       mem_win_base = bar0 + MEMWIN_BASE;
-
-       /*
-        * Set up memory window for accessing adapter memory ranges.  (Read
-        * back MA register to ensure that changes propagate before we attempt
-        * to use the new values.)
-        */
-       csio_wr_reg32(hw, mem_win_base | BIR(0) |
-                         WINDOW(ilog2(MEMWIN_APERTURE) - 10),
-                         PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
-       csio_rd_reg32(hw,
-                     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
-       return 0;
-}
-
-/*
- * Interrupt handler for the PCIE module.
- */
-static void
-csio_t4_pcie_intr_handler(struct csio_hw *hw)
-{
-       static struct intr_info sysbus_intr_info[] = {
-               { RNPP, "RXNP array parity error", -1, 1 },
-               { RPCP, "RXPC array parity error", -1, 1 },
-               { RCIP, "RXCIF array parity error", -1, 1 },
-               { RCCP, "Rx completions control array parity error", -1, 1 },
-               { RFTP, "RXFT array parity error", -1, 1 },
-               { 0, NULL, 0, 0 }
-       };
-       static struct intr_info pcie_port_intr_info[] = {
-               { TPCP, "TXPC array parity error", -1, 1 },
-               { TNPP, "TXNP array parity error", -1, 1 },
-               { TFTP, "TXFT array parity error", -1, 1 },
-               { TCAP, "TXCA array parity error", -1, 1 },
-               { TCIP, "TXCIF array parity error", -1, 1 },
-               { RCAP, "RXCA array parity error", -1, 1 },
-               { OTDD, "outbound request TLP discarded", -1, 1 },
-               { RDPE, "Rx data parity error", -1, 1 },
-               { TDUE, "Tx uncorrectable data error", -1, 1 },
-               { 0, NULL, 0, 0 }
-       };
-
-       static struct intr_info pcie_intr_info[] = {
-               { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
-               { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
-               { MSIDATAPERR, "MSI data parity error", -1, 1 },
-               { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
-               { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
-               { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
-               { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
-               { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
-               { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
-               { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
-               { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
-               { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
-               { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
-               { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
-               { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
-               { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
-               { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
-               { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
-               { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
-               { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
-               { FIDPERR, "PCI FID parity error", -1, 1 },
-               { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
-               { MATAGPERR, "PCI MA tag parity error", -1, 1 },
-               { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
-               { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
-               { RXWRPERR, "PCI Rx write parity error", -1, 1 },
-               { RPLPERR, "PCI replay buffer parity error", -1, 1 },
-               { PCIESINT, "PCI core secondary fault", -1, 1 },
-               { PCIEPINT, "PCI core primary fault", -1, 1 },
-               { UNXSPLCPLERR, "PCI unexpected split completion error", -1,
-                 0 },
-               { 0, NULL, 0, 0 }
-       };
-
-       int fat;
-       fat = csio_handle_intr_status(hw,
-                                     PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
-                                     sysbus_intr_info) +
-             csio_handle_intr_status(hw,
-                                     PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
-                                     pcie_port_intr_info) +
-             csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
-       if (fat)
-               csio_hw_fatal_err(hw);
-}
-
-/*
- * csio_t4_flash_cfg_addr - return the address of the flash configuration file
- * @hw: the HW module
- *
- * Return the address within the flash where the Firmware Configuration
- * File is stored.
- */
-static unsigned int
-csio_t4_flash_cfg_addr(struct csio_hw *hw)
-{
-       return FLASH_CFG_OFFSET;
-}
-
-/*
- *      csio_t4_mc_read - read from MC through backdoor accesses
- *      @hw: the hw module
- *      @idx: not used for T4 adapter
- *      @addr: address of first byte requested
- *      @data: 64 bytes of data containing the requested address
- *      @ecc: where to store the corresponding 64-bit ECC word
- *
- *      Read 64 bytes of data from MC starting at a 64-byte-aligned address
- *      that covers the requested address @addr.  If @parity is not %NULL it
- *      is assigned the 64-bit ECC word for the read data.
- */
-static int
-csio_t4_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
-               uint64_t *ecc)
-{
-       int i;
-
-       if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
-               return -EBUSY;
-       csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
-       csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
-       csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
-       csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
-                     MC_BIST_CMD);
-       i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
-                                    0, 10, 1, NULL);
-       if (i)
-               return i;
-
-#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
-
-       for (i = 15; i >= 0; i--)
-               *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
-       if (ecc)
-               *ecc = csio_rd_reg64(hw, MC_DATA(16));
-#undef MC_DATA
-       return 0;
-}
-
-/*
- *      csio_t4_edc_read - read from EDC through backdoor accesses
- *      @hw: the hw module
- *      @idx: which EDC to access
- *      @addr: address of first byte requested
- *      @data: 64 bytes of data containing the requested address
- *      @ecc: where to store the corresponding 64-bit ECC word
- *
- *      Read 64 bytes of data from EDC starting at a 64-byte-aligned address
- *      that covers the requested address @addr.  If @parity is not %NULL it
- *      is assigned the 64-bit ECC word for the read data.
- */
-static int
-csio_t4_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
-               uint64_t *ecc)
-{
-       int i;
-
-       idx *= EDC_STRIDE;
-       if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
-               return -EBUSY;
-       csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
-       csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
-       csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
-       csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
-                     EDC_BIST_CMD + idx);
-       i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
-                                    0, 10, 1, NULL);
-       if (i)
-               return i;
-
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
-
-       for (i = 15; i >= 0; i--)
-               *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
-       if (ecc)
-               *ecc = csio_rd_reg64(hw, EDC_DATA(16));
-#undef EDC_DATA
-       return 0;
-}
-
-/*
- * csio_t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
- * @hw: the csio_hw
- * @win: PCI-E memory Window to use
- * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1
- * @addr: address within indicated memory type
- * @len: amount of memory to transfer
- * @buf: host memory buffer
- * @dir: direction of transfer 1 => read, 0 => write
- *
- * Reads/writes an [almost] arbitrary memory region in the firmware: the
- * firmware memory address, length and host buffer must be aligned on
- * 32-bit boudaries.  The memory is transferred as a raw byte sequence
- * from/to the firmware's memory.  If this memory contains data
- * structures which contain multi-byte integers, it's the callers
- * responsibility to perform appropriate byte order conversions.
- */
-static int
-csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
-               u32 len, uint32_t *buf, int dir)
-{
-       u32 pos, start, offset, memoffset, bar0;
-       u32 edc_size, mc_size, mem_reg, mem_aperture, mem_base;
-
-       /*
-        * Argument sanity checks ...
-        */
-       if ((addr & 0x3) || (len & 0x3))
-               return -EINVAL;
-
-       /* Offset into the region of memory which is being accessed
-        * MEM_EDC0 = 0
-        * MEM_EDC1 = 1
-        * MEM_MC   = 2 -- T4
-        */
-       edc_size  = EDRAM0_SIZE_G(csio_rd_reg32(hw, MA_EDRAM0_BAR_A));
-       if (mtype != MEM_MC1)
-               memoffset = (mtype * (edc_size * 1024 * 1024));
-       else {
-               mc_size = EXT_MEM_SIZE_G(csio_rd_reg32(hw,
-                                                      MA_EXT_MEMORY_BAR_A));
-               memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
-       }
-
-       /* Determine the PCIE_MEM_ACCESS_OFFSET */
-       addr = addr + memoffset;
-
-       /*
-        * Each PCI-E Memory Window is programmed with a window size -- or
-        * "aperture" -- which controls the granularity of its mapping onto
-        * adapter memory.  We need to grab that aperture in order to know
-        * how to use the specified window.  The window is also programmed
-        * with the base address of the Memory Window in BAR0's address
-        * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
-        * the address is relative to BAR0.
-        */
-       mem_reg = csio_rd_reg32(hw,
-                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
-       mem_aperture = 1 << (WINDOW(mem_reg) + 10);
-       mem_base = GET_PCIEOFST(mem_reg) << 10;
-
-       bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
-       bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
-       mem_base -= bar0;
-
-       start = addr & ~(mem_aperture-1);
-       offset = addr - start;
-
-       csio_dbg(hw, "csio_t4_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
-                mem_reg, mem_aperture);
-       csio_dbg(hw, "csio_t4_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n",
-                mem_base, memoffset);
-       csio_dbg(hw, "csio_t4_memory_rw: bar0: 0x%x, start:0x%x, offset:0x%x\n",
-                bar0, start, offset);
-       csio_dbg(hw, "csio_t4_memory_rw: mtype: %d, addr: 0x%x, len: %d\n",
-                mtype, addr, len);
-
-       for (pos = start; len > 0; pos += mem_aperture, offset = 0) {
-               /*
-                * Move PCI-E Memory Window to our current transfer
-                * position.  Read it back to ensure that changes propagate
-                * before we attempt to use the new value.
-                */
-               csio_wr_reg32(hw, pos,
-                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
-               csio_rd_reg32(hw,
-                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
-
-               while (offset < mem_aperture && len > 0) {
-                       if (dir)
-                               *buf++ = csio_rd_reg32(hw, mem_base + offset);
-                       else
-                               csio_wr_reg32(hw, *buf++, mem_base + offset);
-
-                       offset += sizeof(__be32);
-                       len -= sizeof(__be32);
-               }
-       }
-       return 0;
-}
-
-/*
- * csio_t4_dfs_create_ext_mem - setup debugfs for MC to read the values
- * @hw: the csio_hw
- *
- * This function creates files in the debugfs with external memory region MC.
- */
-static void
-csio_t4_dfs_create_ext_mem(struct csio_hw *hw)
-{
-       u32 size;
-       int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A);
-
-       if (i & EXT_MEM_ENABLE_F) {
-               size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR_A);
-               csio_add_debugfs_mem(hw, "mc", MEM_MC,
-                                    EXT_MEM_SIZE_G(size));
-       }
-}
-
-/* T4 adapter specific function */
-struct csio_hw_chip_ops t4_ops = {
-       .chip_set_mem_win               = csio_t4_set_mem_win,
-       .chip_pcie_intr_handler         = csio_t4_pcie_intr_handler,
-       .chip_flash_cfg_addr            = csio_t4_flash_cfg_addr,
-       .chip_mc_read                   = csio_t4_mc_read,
-       .chip_edc_read                  = csio_t4_edc_read,
-       .chip_memory_rw                 = csio_t4_memory_rw,
-       .chip_dfs_create_ext_mem        = csio_t4_dfs_create_ext_mem,
-};
index 66e180a58718b1c7e7b8c76f806aa0f38582c498..3267f4f627c976f1450670a21b21b3d46cc8e6f3 100644 (file)
@@ -56,11 +56,11 @@ csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win)
         * back MA register to ensure that changes propagate before we attempt
         * to use the new values.)
         */
-       csio_wr_reg32(hw, mem_win_base | BIR(0) |
-                         WINDOW(ilog2(MEMWIN_APERTURE) - 10),
-                         PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+       csio_wr_reg32(hw, mem_win_base | BIR_V(0) |
+                         WINDOW_V(ilog2(MEMWIN_APERTURE) - 10),
+                         PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
        csio_rd_reg32(hw,
-                     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+                     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
 
        return 0;
 }
@@ -72,74 +72,74 @@ static void
 csio_t5_pcie_intr_handler(struct csio_hw *hw)
 {
        static struct intr_info sysbus_intr_info[] = {
-               { RNPP, "RXNP array parity error", -1, 1 },
-               { RPCP, "RXPC array parity error", -1, 1 },
-               { RCIP, "RXCIF array parity error", -1, 1 },
-               { RCCP, "Rx completions control array parity error", -1, 1 },
-               { RFTP, "RXFT array parity error", -1, 1 },
+               { RNPP_F, "RXNP array parity error", -1, 1 },
+               { RPCP_F, "RXPC array parity error", -1, 1 },
+               { RCIP_F, "RXCIF array parity error", -1, 1 },
+               { RCCP_F, "Rx completions control array parity error", -1, 1 },
+               { RFTP_F, "RXFT array parity error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
        static struct intr_info pcie_port_intr_info[] = {
-               { TPCP, "TXPC array parity error", -1, 1 },
-               { TNPP, "TXNP array parity error", -1, 1 },
-               { TFTP, "TXFT array parity error", -1, 1 },
-               { TCAP, "TXCA array parity error", -1, 1 },
-               { TCIP, "TXCIF array parity error", -1, 1 },
-               { RCAP, "RXCA array parity error", -1, 1 },
-               { OTDD, "outbound request TLP discarded", -1, 1 },
-               { RDPE, "Rx data parity error", -1, 1 },
-               { TDUE, "Tx uncorrectable data error", -1, 1 },
+               { TPCP_F, "TXPC array parity error", -1, 1 },
+               { TNPP_F, "TXNP array parity error", -1, 1 },
+               { TFTP_F, "TXFT array parity error", -1, 1 },
+               { TCAP_F, "TXCA array parity error", -1, 1 },
+               { TCIP_F, "TXCIF array parity error", -1, 1 },
+               { RCAP_F, "RXCA array parity error", -1, 1 },
+               { OTDD_F, "outbound request TLP discarded", -1, 1 },
+               { RDPE_F, "Rx data parity error", -1, 1 },
+               { TDUE_F, "Tx uncorrectable data error", -1, 1 },
                { 0, NULL, 0, 0 }
        };
 
        static struct intr_info pcie_intr_info[] = {
-               { MSTGRPPERR, "Master Response Read Queue parity error",
+               { MSTGRPPERR_F, "Master Response Read Queue parity error",
                -1, 1 },
-               { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
-               { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
-               { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
-               { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
-               { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
-               { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
-               { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+               { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
+               { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
+               { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
+               { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
+               { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
+               { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
+               { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
                -1, 1 },
-               { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+               { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
                -1, 1 },
-               { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
-               { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
-               { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
-               { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
-               { DREQWRPERR, "PCI DMA channel write request parity error",
+               { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
+               { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
+               { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
+               { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
+               { DREQWRPERR_F, "PCI DMA channel write request parity error",
                -1, 1 },
-               { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
-               { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
-               { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
-               { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
-               { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
-               { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
-               { FIDPERR, "PCI FID parity error", -1, 1 },
-               { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
-               { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
-               { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
-               { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+               { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
+               { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
+               { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
+               { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
+               { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
+               { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
+               { FIDPERR_F, "PCI FID parity error", -1, 1 },
+               { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
+               { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
+               { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
+               { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
                -1, 1 },
-               { IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
+               { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
                -1, 1 },
-               { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
-               { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
-               { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
-               { READRSPERR, "Outbound read error", -1, 0 },
+               { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
+               { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
+               { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+               { READRSPERR_F, "Outbound read error", -1, 0 },
                { 0, NULL, 0, 0 }
        };
 
        int fat;
        fat = csio_handle_intr_status(hw,
-                                     PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+                                     PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
                                      sysbus_intr_info) +
              csio_handle_intr_status(hw,
-                                     PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+                                     PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
                                      pcie_port_intr_info) +
-             csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
+             csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
        if (fat)
                csio_hw_fatal_err(hw);
 }
@@ -177,25 +177,25 @@ csio_t5_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
        uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
        uint32_t mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
 
-       mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD, idx);
-       mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR, idx);
-       mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN, idx);
-       mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
-       mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
+       mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD_A, idx);
+       mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
+       mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
+       mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
+       mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
 
-       if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST)
+       if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST_F)
                return -EBUSY;
        csio_wr_reg32(hw, addr & ~0x3fU, mc_bist_cmd_addr_reg);
        csio_wr_reg32(hw, 64, mc_bist_cmd_len_reg);
        csio_wr_reg32(hw, 0xc, mc_bist_data_pattern_reg);
-       csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST |  BIST_CMD_GAP(1),
+       csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F |  BIST_CMD_GAP_V(1),
                      mc_bist_cmd_reg);
-       i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST,
+       i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST_F,
                                     0, 10, 1, NULL);
        if (i)
                return i;
 
-#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA_A, i)
 
        for (i = 15; i >= 0; i--)
                *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
@@ -231,27 +231,27 @@ csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
 
-       edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD, idx);
-       edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
-       edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
-       edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
-       edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
+       edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
+       edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
+       edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
+       edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
+       edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
 #undef EDC_REG_T5
 #undef EDC_STRIDE_T5
 
-       if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST)
+       if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST_F)
                return -EBUSY;
        csio_wr_reg32(hw, addr & ~0x3fU, edc_bist_cmd_addr_reg);
        csio_wr_reg32(hw, 64, edc_bist_cmd_len_reg);
        csio_wr_reg32(hw, 0xc, edc_bist_cmd_data_pattern);
-       csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST |  BIST_CMD_GAP(1),
+       csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F |  BIST_CMD_GAP_V(1),
                      edc_bist_cmd_reg);
-       i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST,
+       i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST_F,
                                     0, 10, 1, NULL);
        if (i)
                return i;
 
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA_A, i) + idx)
 
        for (i = 15; i >= 0; i--)
                *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
@@ -320,13 +320,13 @@ csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
         * the address is relative to BAR0.
         */
        mem_reg = csio_rd_reg32(hw,
-                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
-       mem_aperture = 1 << (WINDOW(mem_reg) + 10);
-       mem_base = GET_PCIEOFST(mem_reg) << 10;
+                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win));
+       mem_aperture = 1 << (WINDOW_V(mem_reg) + 10);
+       mem_base = PCIEOFST_G(mem_reg) << 10;
 
        start = addr & ~(mem_aperture-1);
        offset = addr - start;
-       win_pf = V_PFNUM(hw->pfn);
+       win_pf = PFNUM_V(hw->pfn);
 
        csio_dbg(hw, "csio_t5_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
                 mem_reg, mem_aperture);
@@ -344,9 +344,9 @@ csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
                 * before we attempt to use the new value.
                 */
                csio_wr_reg32(hw, pos | win_pf,
-                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
                csio_rd_reg32(hw,
-                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+                       PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
 
                while (offset < mem_aperture && len > 0) {
                        if (dir)
index 34d20cc3e110fcc1ec99aad913b74e87651734e0..9b9794d42ffe4848180dbf3a593376959483c2b3 100644 (file)
@@ -1176,9 +1176,8 @@ static struct pci_error_handlers csio_err_handler = {
  */
 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
        static struct pci_device_id csio_pci_tbl[] = {
-/* Define for iSCSI uses PF5, FCoE uses PF6 */
-#define CH_PCI_DEVICE_ID_FUNCTION      0x5
-#define CH_PCI_DEVICE_ID_FUNCTION2     0x6
+/* Define for FCoE uses PF6 */
+#define CH_PCI_DEVICE_ID_FUNCTION      0x6
 
 #define CH_PCI_ID_TABLE_ENTRY(devid) \
                { PCI_VDEVICE(CHELSIO, (devid)), 0 }
@@ -1256,5 +1255,4 @@ MODULE_DESCRIPTION(CSIO_DRV_DESC);
 MODULE_LICENSE(CSIO_DRV_LICENSE);
 MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
 MODULE_VERSION(CSIO_DRV_VERSION);
-MODULE_FIRMWARE(FW_FNAME_T4);
 MODULE_FIRMWARE(FW_FNAME_T5);
index a8c748a35f9cc6fedb4563c65cb453c584b97780..2fb71c6c3b3723bcdf0485341502931b7843da61 100644 (file)
@@ -317,7 +317,7 @@ csio_fcoe_isr(int irq, void *dev_id)
 
        /* Disable the interrupt for this PCI function. */
        if (hw->intr_mode == CSIO_IM_INTX)
-               csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI));
+               csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
 
        /*
         * The read in the following function will flush the
index 87f9280d9b431370bf9391debc3b1438183ea5d8..c00b2ff72b551e48489051bf74015ba2e95c9d85 100644 (file)
@@ -1758,7 +1758,7 @@ csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
        else {
                /* Program DSGL to dma payload */
                dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
-                                       ULPTX_MORE | ULPTX_NSGE(1));
+                                       ULPTX_MORE_F | ULPTX_NSGE_V(1));
                dsgl.len0 = cpu_to_be32(pld_len);
                dsgl.addr0 = cpu_to_be64(pld->paddr);
                csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
index 08c265c0f353278b63a05ac6478423b67b47445e..9451787ca7f299715a97e34692315d69e7968910 100644 (file)
@@ -327,7 +327,8 @@ csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
 }
 
 #define CSIO_ADVERT_MASK     (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
-                             FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+                             FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G |\
+                             FW_PORT_CAP_ANEG)
 
 /*
  * csio_mb_port- FW PORT command helper
@@ -1104,8 +1105,8 @@ csio_mb_process_portparams_rsp(struct csio_hw *hw,
 void
 csio_mb_intr_enable(struct csio_hw *hw)
 {
-       csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
-       csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+       csio_wr_reg32(hw, MBMSGRDYINTEN_F, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
+       csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
 }
 
 /*
@@ -1117,8 +1118,9 @@ csio_mb_intr_enable(struct csio_hw *hw)
 void
 csio_mb_intr_disable(struct csio_hw *hw)
 {
-       csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
-       csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+       csio_wr_reg32(hw, MBMSGRDYINTEN_V(0),
+                     MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
+       csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A));
 }
 
 static void
@@ -1153,8 +1155,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw)
 {
        int i;
        __be64 cmd[CSIO_MB_MAX_REGS];
-       uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
-       uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+       uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
+       uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
        int size = sizeof(struct fw_debug_cmd);
 
        /* Copy mailbox data */
@@ -1164,8 +1166,8 @@ csio_mb_debug_cmd_handler(struct csio_hw *hw)
        csio_mb_dump_fw_dbg(hw, cmd);
 
        /* Notify FW of mailbox by setting owner as UP */
-       csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW),
-                     ctl_reg);
+       csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
+                     MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
 
        csio_rd_reg32(hw, ctl_reg);
        wmb();
@@ -1187,8 +1189,8 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
        __be64 *cmd = mbp->mb;
        __be64 hdr;
        struct csio_mbm *mbm = &hw->mbm;
-       uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
-       uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+       uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
+       uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
        int size = mbp->mb_size;
        int rv = -EINVAL;
        struct fw_cmd_hdr *fw_hdr;
@@ -1224,12 +1226,12 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
        }
 
        /* Now get ownership of mailbox */
-       owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
+       owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
 
        if (!csio_mb_is_host_owner(owner)) {
 
                for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)
-                       owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
+                       owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg));
                /*
                 * Mailbox unavailable. In immediate mode, fail the command.
                 * In other modes, enqueue the request.
@@ -1271,10 +1273,10 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
        if (mbp->mb_cbfn != NULL) {
                mbm->mcurrent = mbp;
                mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo));
-               csio_wr_reg32(hw, MBMSGVALID | MBINTREQ |
-                             MBOWNER(CSIO_MBOWNER_FW), ctl_reg);
+               csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F |
+                             MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg);
        } else
-               csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW),
+               csio_wr_reg32(hw, MBMSGVALID_F | MBOWNER_V(CSIO_MBOWNER_FW),
                              ctl_reg);
 
        /* Flush posted writes */
@@ -1294,9 +1296,9 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
 
                /* Check for response */
                ctl = csio_rd_reg32(hw, ctl_reg);
-               if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
+               if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
 
-                       if (!(ctl & MBMSGVALID)) {
+                       if (!(ctl & MBMSGVALID_F)) {
                                csio_wr_reg32(hw, 0, ctl_reg);
                                continue;
                        }
@@ -1457,16 +1459,16 @@ csio_mb_isr_handler(struct csio_hw *hw)
        __be64                  *cmd;
        uint32_t                ctl, cim_cause, pl_cause;
        int                     i;
-       uint32_t                ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
-       uint32_t                data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+       uint32_t        ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
+       uint32_t        data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
        int                     size;
        __be64                  hdr;
        struct fw_cmd_hdr       *fw_hdr;
 
-       pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE));
-       cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
+       pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE_A));
+       cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
 
-       if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) {
+       if (!(pl_cause & PFCIM_F) || !(cim_cause & MBMSGRDYINT_F)) {
                CSIO_INC_STATS(hw, n_mbint_unexp);
                return -EINVAL;
        }
@@ -1477,16 +1479,16 @@ csio_mb_isr_handler(struct csio_hw *hw)
         * the upper level cause register. In other words, CIM-cause
         * first followed by PL-Cause next.
         */
-       csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
-       csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE));
+       csio_wr_reg32(hw, MBMSGRDYINT_F, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A));
+       csio_wr_reg32(hw, PFCIM_F, MYPF_REG(PL_PF_INT_CAUSE_A));
 
        ctl = csio_rd_reg32(hw, ctl_reg);
 
-       if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
+       if (csio_mb_is_host_owner(MBOWNER_G(ctl))) {
 
                CSIO_DUMP_MB(hw, hw->pfn, data_reg);
 
-               if (!(ctl & MBMSGVALID)) {
+               if (!(ctl & MBMSGVALID_F)) {
                        csio_warn(hw,
                                  "Stray mailbox interrupt recvd,"
                                  " mailbox data not valid\n");
index 3987284e0d2abcdc98e5158c41245f7d5a76045c..2c4562d82dc06281dc9e9e51cf1e8a9701c041c1 100644 (file)
@@ -298,8 +298,8 @@ csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
        struct csio_dma_buf *dma_buf;
        struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
 
-       sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE |
-                                    ULPTX_NSGE(req->nsge));
+       sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F |
+                                    ULPTX_NSGE_V(req->nsge));
        /* Now add the data SGLs */
        if (likely(!req->dcopy)) {
                scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
index 773da14cfa145c703550e59a1f33c420544fedd4..e8f18174f2e97fdcfb975a53556a9c9069d9c7fa 100644 (file)
@@ -51,12 +51,12 @@ int csio_intr_coalesce_time = 10;   /* value:SGE_TIMER_VALUE_1 */
 static int csio_sge_timer_reg = 1;
 
 #define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val)                           \
-       csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg)
+       csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg##_A)
 
 static void
 csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
 {
-       sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 +
+       sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0_A +
                                                        reg * sizeof(uint32_t));
 }
 
@@ -71,7 +71,7 @@ csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
 static inline uint32_t
 csio_wr_qstat_pgsz(struct csio_hw *hw)
 {
-       return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ?  128 : 64;
+       return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE_F) ?  128 : 64;
 }
 
 /* Ring freelist doorbell */
@@ -84,9 +84,9 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
         * 8 freelist buffer pointers (since each pointer is 8 bytes).
         */
        if (flq->inc_idx >= 8) {
-               csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
-                                 CSIO_HW_PIDX(hw, flq->inc_idx / 8),
-                                 MYPF_REG(SGE_PF_KDOORBELL));
+               csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) |
+                                 PIDX_T5_V(flq->inc_idx / 8) | DBTYPE_F,
+                                 MYPF_REG(SGE_PF_KDOORBELL_A));
                flq->inc_idx &= 7;
        }
 }
@@ -95,10 +95,10 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
 static void
 csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
 {
-       csio_wr_reg32(hw, CIDXINC(0)            |
-                         INGRESSQID(iqid)      |
-                         TIMERREG(X_TIMERREG_RESTART_COUNTER),
-                         MYPF_REG(SGE_PF_GTS));
+       csio_wr_reg32(hw, CIDXINC_V(0)          |
+                         INGRESSQID_V(iqid)    |
+                         TIMERREG_V(X_TIMERREG_RESTART_COUNTER),
+                         MYPF_REG(SGE_PF_GTS_A));
 }
 
 /*
@@ -982,9 +982,9 @@ csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
 
        wmb();
        /* Ring SGE Doorbell writing q->pidx into it */
-       csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
-                         CSIO_HW_PIDX(hw, q->inc_idx),
-                         MYPF_REG(SGE_PF_KDOORBELL));
+       csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) |
+                         PIDX_T5_V(q->inc_idx) | DBTYPE_F,
+                         MYPF_REG(SGE_PF_KDOORBELL_A));
        q->inc_idx = 0;
 
        return 0;
@@ -1242,10 +1242,10 @@ csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
 
 restart:
        /* Now inform SGE about our incremental index value */
-       csio_wr_reg32(hw, CIDXINC(q->inc_idx)           |
-                         INGRESSQID(q->un.iq.physiqid) |
-                         TIMERREG(csio_sge_timer_reg),
-                         MYPF_REG(SGE_PF_GTS));
+       csio_wr_reg32(hw, CIDXINC_V(q->inc_idx)         |
+                         INGRESSQID_V(q->un.iq.physiqid)       |
+                         TIMERREG_V(csio_sge_timer_reg),
+                         MYPF_REG(SGE_PF_GTS_A));
        q->stats.n_tot_rsps += q->inc_idx;
 
        q->inc_idx = 0;
@@ -1310,22 +1310,23 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
        uint32_t ingpad = 0;
        uint32_t stat_len = clsz > 64 ? 128 : 64;
 
-       csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) |
-                     HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) |
-                     HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) |
-                     HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps),
-                     SGE_HOST_PAGE_SIZE);
+       csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
+                     HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
+                     HOSTPAGESIZEPF4_V(s_hps) | HOSTPAGESIZEPF5_V(s_hps) |
+                     HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps),
+                     SGE_HOST_PAGE_SIZE_A);
 
        sge->csio_fl_align = clsz < 32 ? 32 : clsz;
        ingpad = ilog2(sge->csio_fl_align) - 5;
 
-       csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK |
-                                           EGRSTATUSPAGESIZE(1),
-                          INGPADBOUNDARY(ingpad) |
-                          EGRSTATUSPAGESIZE(stat_len != 64));
+       csio_set_reg_field(hw, SGE_CONTROL_A,
+                          INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+                          EGRSTATUSPAGESIZE_F,
+                          INGPADBOUNDARY_V(ingpad) |
+                          EGRSTATUSPAGESIZE_V(stat_len != 64));
 
        /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
-       csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
+       csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
 
        /*
         * If using hard params, the following will get set correctly
@@ -1333,23 +1334,24 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
         */
        if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
                csio_wr_reg32(hw,
-                       (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
+                       (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) +
                        sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
-                       SGE_FL_BUFFER_SIZE2);
+                       SGE_FL_BUFFER_SIZE2_A);
                csio_wr_reg32(hw,
-                       (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
+                       (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) +
                        sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
-                       SGE_FL_BUFFER_SIZE3);
+                       SGE_FL_BUFFER_SIZE3_A);
        }
 
-       csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
+       csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A);
 
        /* default value of rx_dma_offset of the NIC driver */
-       csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
-                          PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
+       csio_set_reg_field(hw, SGE_CONTROL_A,
+                          PKTSHIFT_V(PKTSHIFT_M),
+                          PKTSHIFT_V(CSIO_SGE_RX_DMA_OFFSET));
 
-       csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG,
-                                   CSUM_HAS_PSEUDO_HDR, 0);
+       csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG_A,
+                                   CSUM_HAS_PSEUDO_HDR_F, 0);
 }
 
 static void
@@ -1384,9 +1386,9 @@ csio_wr_get_sge(struct csio_hw *hw)
        u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
        u32 ingress_rx_threshold;
 
-       sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+       sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
 
-       ingpad = INGPADBOUNDARY_GET(sge->sge_control);
+       ingpad = INGPADBOUNDARY_G(sge->sge_control);
 
        switch (ingpad) {
        case X_INGPCIEBOUNDARY_32B:
@@ -1410,28 +1412,28 @@ csio_wr_get_sge(struct csio_hw *hw)
        for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
                csio_get_flbuf_size(hw, sge, i);
 
-       timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1);
-       timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3);
-       timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5);
+       timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1_A);
+       timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3_A);
+       timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5_A);
 
        sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
-                                       TIMERVALUE0_GET(timer_value_0_and_1));
+                                       TIMERVALUE0_G(timer_value_0_and_1));
        sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
-                                       TIMERVALUE1_GET(timer_value_0_and_1));
+                                       TIMERVALUE1_G(timer_value_0_and_1));
        sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
-                                       TIMERVALUE2_GET(timer_value_2_and_3));
+                                       TIMERVALUE2_G(timer_value_2_and_3));
        sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
-                                       TIMERVALUE3_GET(timer_value_2_and_3));
+                                       TIMERVALUE3_G(timer_value_2_and_3));
        sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
-                                       TIMERVALUE4_GET(timer_value_4_and_5));
+                                       TIMERVALUE4_G(timer_value_4_and_5));
        sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
-                                       TIMERVALUE5_GET(timer_value_4_and_5));
+                                       TIMERVALUE5_G(timer_value_4_and_5));
 
-       ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD);
-       sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
-       sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
-       sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
-       sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
+       ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD_A);
+       sge->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
+       sge->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
+       sge->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
+       sge->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
 
        csio_init_intr_coalesce_parms(hw);
 }
@@ -1454,9 +1456,9 @@ csio_wr_set_sge(struct csio_hw *hw)
         * Set up our basic SGE mode to deliver CPL messages to our Ingress
         * Queue and Packet Date to the Free List.
         */
-       csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1));
+       csio_set_reg_field(hw, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F);
 
-       sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+       sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
 
        /* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
 
@@ -1464,22 +1466,23 @@ csio_wr_set_sge(struct csio_hw *hw)
         * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
         * and generate an interrupt when this occurs so we can recover.
         */
-       csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
-                  HP_INT_THRESH(HP_INT_THRESH_MASK) |
-                  CSIO_HW_LP_INT_THRESH(hw, CSIO_HW_M_LP_INT_THRESH(hw)),
-                  HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
-                  CSIO_HW_LP_INT_THRESH(hw, CSIO_SGE_DBFIFO_INT_THRESH));
+       csio_set_reg_field(hw, SGE_DBFIFO_STATUS_A,
+                          LP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M),
+                          LP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH));
+       csio_set_reg_field(hw, SGE_DBFIFO_STATUS2_A,
+                          HP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M),
+                          HP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH));
 
-       csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
-                          ENABLE_DROP);
+       csio_set_reg_field(hw, SGE_DOORBELL_CONTROL_A, ENABLE_DROP_F,
+                          ENABLE_DROP_F);
 
        /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
 
        CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
        csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
-                     & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2);
+                     & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2_A);
        csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
-                     & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3);
+                     & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3_A);
        CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
        CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
        CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
@@ -1502,26 +1505,26 @@ csio_wr_set_sge(struct csio_hw *hw)
        sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
        sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
 
-       csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) |
-                     THRESHOLD_1(sge->counter_val[1]) |
-                     THRESHOLD_2(sge->counter_val[2]) |
-                     THRESHOLD_3(sge->counter_val[3]),
-                     SGE_INGRESS_RX_THRESHOLD);
+       csio_wr_reg32(hw, THRESHOLD_0_V(sge->counter_val[0]) |
+                     THRESHOLD_1_V(sge->counter_val[1]) |
+                     THRESHOLD_2_V(sge->counter_val[2]) |
+                     THRESHOLD_3_V(sge->counter_val[3]),
+                     SGE_INGRESS_RX_THRESHOLD_A);
 
        csio_wr_reg32(hw,
-                  TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
-                  TIMERVALUE1(csio_us_to_core_ticks(hw, sge->timer_val[1])),
-                  SGE_TIMER_VALUE_0_AND_1);
+                  TIMERVALUE0_V(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
+                  TIMERVALUE1_V(csio_us_to_core_ticks(hw, sge->timer_val[1])),
+                  SGE_TIMER_VALUE_0_AND_1_A);
 
        csio_wr_reg32(hw,
-                  TIMERVALUE2(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
-                  TIMERVALUE3(csio_us_to_core_ticks(hw, sge->timer_val[3])),
-                  SGE_TIMER_VALUE_2_AND_3);
+                  TIMERVALUE2_V(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
+                  TIMERVALUE3_V(csio_us_to_core_ticks(hw, sge->timer_val[3])),
+                  SGE_TIMER_VALUE_2_AND_3_A);
 
        csio_wr_reg32(hw,
-                  TIMERVALUE4(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
-                  TIMERVALUE5(csio_us_to_core_ticks(hw, sge->timer_val[5])),
-                  SGE_TIMER_VALUE_4_AND_5);
+                  TIMERVALUE4_V(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
+                  TIMERVALUE5_V(csio_us_to_core_ticks(hw, sge->timer_val[5])),
+                  SGE_TIMER_VALUE_4_AND_5_A);
 
        csio_init_intr_coalesce_parms(hw);
 }
index a83d2ceded83e033e322343009cc3a054dfb2fdf..dd00e5fe4a5e75b2d5a8599af63d8d18aa3fff02 100644 (file)
@@ -28,6 +28,7 @@
 #include "t4fw_api.h"
 #include "l2t.h"
 #include "cxgb4i.h"
+#include "clip_tbl.h"
 
 static unsigned int dbg_level;
 
@@ -704,7 +705,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
        struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
        unsigned short tcp_opt = ntohs(req->tcp_opt);
        unsigned int tid = GET_TID(req);
-       unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
+       unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
        struct tid_info *t = lldi->tids;
        u32 rcv_isn = be32_to_cpu(req->rcv_isn);
@@ -752,15 +753,15 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
        if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
                csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
 
-       csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40;
-       if (GET_TCPOPT_TSTAMP(tcp_opt))
+       csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
+       if (TCPOPT_TSTAMP_G(tcp_opt))
                csk->advmss -= 12;
        if (csk->advmss < 128)
                csk->advmss = 128;
 
        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
                "csk 0x%p, mss_idx %u, advmss %u.\n",
-                       csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss);
+                       csk, TCPOPT_MSS_G(tcp_opt), csk->advmss);
 
        cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
 
@@ -856,8 +857,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
        struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
        unsigned int tid = GET_TID(rpl);
        unsigned int atid =
-               GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
-       unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
+               TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status)));
+       unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status));
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
        struct tid_info *t = lldi->tids;
 
@@ -1112,7 +1113,7 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
                hlen = ntohs(cpl->len);
                dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
 
-               plen = ISCSI_PDU_LEN(pdu_len_ddp);
+               plen = ISCSI_PDU_LEN_G(pdu_len_ddp);
                if (is_t4(lldi->adapter_type))
                        plen -= 40;
 
@@ -1322,6 +1323,9 @@ static inline void l2t_put(struct cxgbi_sock *csk)
 static void release_offload_resources(struct cxgbi_sock *csk)
 {
        struct cxgb4_lld_info *lldi;
+#if IS_ENABLED(CONFIG_IPV6)
+       struct net_device *ndev = csk->cdev->ports[csk->port_id];
+#endif
 
        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
                "csk 0x%p,%u,0x%lx,%u.\n",
@@ -1334,6 +1338,12 @@ static void release_offload_resources(struct cxgbi_sock *csk)
        }
 
        l2t_put(csk);
+#if IS_ENABLED(CONFIG_IPV6)
+       if (csk->csk_family == AF_INET6)
+               cxgb4_clip_release(ndev,
+                                  (const u32 *)&csk->saddr6.sin6_addr, 1);
+#endif
+
        if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
                free_atid(csk);
        else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
@@ -1391,10 +1401,15 @@ static int init_act_open(struct cxgbi_sock *csk)
        csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
        if (!csk->l2t) {
                pr_err("%s, cannot alloc l2t.\n", ndev->name);
-               goto rel_resource;
+               goto rel_resource_without_clip;
        }
        cxgbi_sock_get(csk);
 
+#if IS_ENABLED(CONFIG_IPV6)
+       if (csk->csk_family == AF_INET6)
+               cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
+#endif
+
        if (t4) {
                size = sizeof(struct cpl_act_open_req);
                size6 = sizeof(struct cpl_act_open_req6);
@@ -1451,6 +1466,12 @@ static int init_act_open(struct cxgbi_sock *csk)
        return 0;
 
 rel_resource:
+#if IS_ENABLED(CONFIG_IPV6)
+       if (csk->csk_family == AF_INET6)
+               cxgb4_clip_release(ndev,
+                                  (const u32 *)&csk->saddr6.sin6_addr, 1);
+#endif
+rel_resource_without_clip:
        if (n)
                neigh_release(n);
        if (skb)
@@ -1619,7 +1640,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
        req = (struct cpl_set_tcb_field *)skb->head;
        INIT_TP_WR(req, csk->tid);
        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
-       req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
+       req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
        req->word_cookie = htons(0);
        req->mask = cpu_to_be64(0x3 << 8);
        req->val = cpu_to_be64(pg_idx << 8);
@@ -1651,7 +1672,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
        req = (struct cpl_set_tcb_field *)skb->head;
        INIT_TP_WR(req, tid);
        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
-       req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
+       req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
        req->word_cookie = htons(0);
        req->mask = cpu_to_be64(0x3 << 4);
        req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
index 1dba62c5cf6a0b364df79805f5e8ee857ba36b46..1efebc9eedfb384312de87e0bd4548599524b0dc 100644 (file)
@@ -136,11 +136,12 @@ static void __detach_handler (struct kref *kref)
        struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh;
        struct scsi_device *sdev = scsi_dh_data->sdev;
 
+       scsi_dh->detach(sdev);
+
        spin_lock_irq(sdev->request_queue->queue_lock);
        sdev->scsi_dh_data = NULL;
        spin_unlock_irq(sdev->request_queue->queue_lock);
 
-       scsi_dh->detach(sdev);
        sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name);
        module_put(scsi_dh->module);
 }
index 3b73b96619e2deeb82e203927f57904913a51dff..26270c351624f229cf85b6069ffe53ee26d50bef 100644 (file)
@@ -39,7 +39,7 @@
 
 #define DRV_NAME               "fnic"
 #define DRV_DESCRIPTION                "Cisco FCoE HBA Driver"
-#define DRV_VERSION            "1.6.0.16"
+#define DRV_VERSION            "1.6.0.17"
 #define PFX                    DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
index 2097de42a14775c1b164485482e834198a77f474..155b286f1a9d3cc8b366a6a5b7610ae562337f62 100644 (file)
@@ -1892,6 +1892,21 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
                goto fnic_abort_cmd_end;
        }
 
+       /* IO out of order */
+
+       if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
+               spin_unlock_irqrestore(io_lock, flags);
+               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                       "Issuing Host reset due to out of order IO\n");
+
+               if (fnic_host_reset(sc) == FAILED) {
+                       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                               "fnic_host_reset failed.\n");
+               }
+               ret = FAILED;
+               goto fnic_abort_cmd_end;
+       }
+
        CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
 
        /*
index df4e27cd996a3c68cf095e40d7a6ccf7ebf6a065..9219953ee949a9dfaf1ff0f044e41a3e5c13adc7 100644 (file)
@@ -683,6 +683,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
        ipr_reinit_ipr_cmnd(ipr_cmd);
        ipr_cmd->u.scratch = 0;
        ipr_cmd->sibling = NULL;
+       ipr_cmd->eh_comp = NULL;
        ipr_cmd->fast_done = fast_done;
        init_timer(&ipr_cmd->timer);
 }
@@ -848,6 +849,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
 
        scsi_dma_unmap(ipr_cmd->scsi_cmd);
        scsi_cmd->scsi_done(scsi_cmd);
+       if (ipr_cmd->eh_comp)
+               complete(ipr_cmd->eh_comp);
        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 }
 
@@ -4811,6 +4814,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
        return rc;
 }
 
+/**
+ * ipr_match_lun - Match function for specified LUN
+ * @ipr_cmd:   ipr command struct
+ * @device:            device to match (sdev)
+ *
+ * Returns:
+ *     1 if command matches sdev / 0 if command does not match sdev
+ **/
+static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
+{
+       if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
+               return 1;
+       return 0;
+}
+
+/**
+ * ipr_wait_for_ops - Wait for matching commands to complete
+ * @ipr_cmd:   ipr command struct
+ * @device:            device to match (sdev)
+ * @match:             match function to use
+ *
+ * Returns:
+ *     SUCCESS / FAILED
+ **/
+static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
+                           int (*match)(struct ipr_cmnd *, void *))
+{
+       struct ipr_cmnd *ipr_cmd;
+       int wait;
+       unsigned long flags;
+       struct ipr_hrr_queue *hrrq;
+       signed long timeout = IPR_ABORT_TASK_TIMEOUT;
+       DECLARE_COMPLETION_ONSTACK(comp);
+
+       ENTER;
+       do {
+               wait = 0;
+
+               for_each_hrrq(hrrq, ioa_cfg) {
+                       spin_lock_irqsave(hrrq->lock, flags);
+                       list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+                               if (match(ipr_cmd, device)) {
+                                       ipr_cmd->eh_comp = &comp;
+                                       wait++;
+                               }
+                       }
+                       spin_unlock_irqrestore(hrrq->lock, flags);
+               }
+
+               if (wait) {
+                       timeout = wait_for_completion_timeout(&comp, timeout);
+
+                       if (!timeout) {
+                               wait = 0;
+
+                               for_each_hrrq(hrrq, ioa_cfg) {
+                                       spin_lock_irqsave(hrrq->lock, flags);
+                                       list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+                                               if (match(ipr_cmd, device)) {
+                                                       ipr_cmd->eh_comp = NULL;
+                                                       wait++;
+                                               }
+                                       }
+                                       spin_unlock_irqrestore(hrrq->lock, flags);
+                               }
+
+                               if (wait)
+                                       dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
+                               LEAVE;
+                               return wait ? FAILED : SUCCESS;
+                       }
+               }
+       } while (wait);
+
+       LEAVE;
+       return SUCCESS;
+}
+
 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
 {
        struct ipr_ioa_cfg *ioa_cfg;
@@ -5030,11 +5111,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
 {
        int rc;
+       struct ipr_ioa_cfg *ioa_cfg;
+
+       ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
 
        spin_lock_irq(cmd->device->host->host_lock);
        rc = __ipr_eh_dev_reset(cmd);
        spin_unlock_irq(cmd->device->host->host_lock);
 
+       if (rc == SUCCESS)
+               rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
+
        return rc;
 }
 
@@ -5234,13 +5321,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
 {
        unsigned long flags;
        int rc;
+       struct ipr_ioa_cfg *ioa_cfg;
 
        ENTER;
 
+       ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+
        spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
        rc = ipr_cancel_op(scsi_cmd);
        spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
 
+       if (rc == SUCCESS)
+               rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
        LEAVE;
        return rc;
 }
index b4f3eec51bc9b19783f931116f63aa70bc2d235a..ec03b42fa2b9fe0f388bc266fd75e8190460f682 100644 (file)
@@ -1606,6 +1606,7 @@ struct ipr_cmnd {
                struct scsi_device *sdev;
        } u;
 
+       struct completion *eh_comp;
        struct ipr_hrr_queue *hrrq;
        struct ipr_ioa_cfg *ioa_cfg;
 };
index 8c27b6a77ec4b1ae9c914cfe15eb05d7dd0b371a..cf222f46eac5da0a3668516e10b346ba8344adb7 100644 (file)
@@ -1473,13 +1473,7 @@ static int pmcraid_notify_aen(
        }
 
        /* send genetlink multicast message to notify appplications */
-       result = genlmsg_end(skb, msg_header);
-
-       if (result < 0) {
-               pmcraid_err("genlmsg_end failed\n");
-               nlmsg_free(skb);
-               return result;
-       }
+       genlmsg_end(skb, msg_header);
 
        result = genlmsg_multicast(&pmcraid_event_family, skb,
                                   0, 0, GFP_ATOMIC);
index 12ca291c1380845e45a40b0b4f5f4dd3356ab429..cce1cbc1a9276f4492d6efa16996313c00daabd6 100644 (file)
@@ -734,7 +734,9 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
         * Return target busy if we've received a non-zero retry_delay_timer
         * in a FCP_RSP.
         */
-       if (time_after(jiffies, fcport->retry_delay_timestamp))
+       if (fcport->retry_delay_timestamp == 0) {
+               /* retry delay not set */
+       } else if (time_after(jiffies, fcport->retry_delay_timestamp))
                fcport->retry_delay_timestamp = 0;
        else
                goto qc24_target_busy;
index e02885451425dbd4af6272ce53d1b2b54f809a43..9b3829931f40d95c2cdd4e673c33a56b510edc9b 100644 (file)
@@ -986,9 +986,9 @@ int scsi_device_get(struct scsi_device *sdev)
                return -ENXIO;
        if (!get_device(&sdev->sdev_gendev))
                return -ENXIO;
-       /* We can fail this if we're doing SCSI operations
+       /* We can fail try_module_get if we're doing SCSI operations
         * from module exit (like cache flush) */
-       try_module_get(sdev->host->hostt->module);
+       __module_get(sdev->host->hostt->module);
 
        return 0;
 }
@@ -1004,14 +1004,7 @@ EXPORT_SYMBOL(scsi_device_get);
  */
 void scsi_device_put(struct scsi_device *sdev)
 {
-#ifdef CONFIG_MODULE_UNLOAD
-       struct module *module = sdev->host->hostt->module;
-
-       /* The module refcount will be zero if scsi_device_get()
-        * was called from a module removal routine */
-       if (module && module_refcount(module) != 0)
-               module_put(module);
-#endif
+       module_put(sdev->host->hostt->module);
        put_device(&sdev->sdev_gendev);
 }
 EXPORT_SYMBOL(scsi_device_put);
index 7b8b51bc29b4353debc9bd9911fcbf939753c782..4aca1b0378c2458212945a3877b303062ea529ca 100644 (file)
@@ -1623,7 +1623,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
        req_opcode = cmd[3];
        req_sa = get_unaligned_be16(cmd + 4);
        alloc_len = get_unaligned_be32(cmd + 6);
-       if (alloc_len < 4 && alloc_len > 0xffff) {
+       if (alloc_len < 4 || alloc_len > 0xffff) {
                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
                return check_condition_result;
        }
@@ -1631,7 +1631,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
                a_len = 8192;
        else
                a_len = alloc_len;
-       arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL);
+       arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
        if (NULL == arr) {
                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
                                INSUFF_RES_ASCQ);
index e42fff6e8c109d66891bf291c07f7cb14a875b30..8afb01604d515baff0ed4b10e2d526326935a913 100644 (file)
@@ -1041,7 +1041,7 @@ retry:
                }
                /* signal not to enter either branch of the if () below */
                timeleft = 0;
-               rtn = NEEDS_RETRY;
+               rtn = FAILED;
        } else {
                timeleft = wait_for_completion_timeout(&done, timeout);
                rtn = SUCCESS;
@@ -1081,7 +1081,7 @@ retry:
                        rtn = FAILED;
                        break;
                }
-       } else if (!rtn) {
+       } else if (rtn != FAILED) {
                scsi_abort_eh_cmnd(scmd);
                rtn = FAILED;
        }
index 9ea95dd3e2604eea2613a5a15d074c2357fac7dd..17bb541f7cc259a8a3f52c65ec1cae5636e87f65 100644 (file)
@@ -591,7 +591,6 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
 {
        struct scatterlist *first_chunk = NULL;
-       gfp_t gfp_mask = mq ? GFP_NOIO : GFP_ATOMIC;
        int ret;
 
        BUG_ON(!nents);
@@ -606,7 +605,7 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
        }
 
        ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
-                              first_chunk, gfp_mask, scsi_sg_alloc);
+                              first_chunk, GFP_ATOMIC, scsi_sg_alloc);
        if (unlikely(ret))
                scsi_free_sgtable(sdb, mq);
        return ret;
@@ -1144,7 +1143,17 @@ int scsi_init_io(struct scsi_cmnd *cmd)
                struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
                int ivecs, count;
 
-               BUG_ON(prot_sdb == NULL);
+               if (prot_sdb == NULL) {
+                       /*
+                        * This can happen if someone (e.g. multipath)
+                        * queues a command to a device on an adapter
+                        * that does not support DIX.
+                        */
+                       WARN_ON_ONCE(1);
+                       error = BLKPREP_KILL;
+                       goto err_exit;
+               }
+
                ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
 
                if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
index fedab3c21ddf18adcb291a99c49b7b4ac051aa85..05ea0d49a3a3ddf2f039670db88c4d9f126662f4 100644 (file)
@@ -2623,8 +2623,9 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
                                sd_config_discard(sdkp, SD_LBP_WS16);
 
                } else {        /* LBP VPD page tells us what to use */
-
-                       if (sdkp->lbpws)
+                       if (sdkp->lbpu && sdkp->max_unmap_blocks && !sdkp->lbprz)
+                               sd_config_discard(sdkp, SD_LBP_UNMAP);
+                       else if (sdkp->lbpws)
                                sd_config_discard(sdkp, SD_LBP_WS16);
                        else if (sdkp->lbpws10)
                                sd_config_discard(sdkp, SD_LBP_WS10);
@@ -2799,9 +2800,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
         */
        sd_set_flush_flag(sdkp);
 
-       max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
-                               sdkp->max_xfer_blocks);
+       max_xfer = sdkp->max_xfer_blocks;
        max_xfer <<= ilog2(sdp->sector_size) - 9;
+
+       max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
+                               max_xfer);
        blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
        set_capacity(disk, sdkp->capacity);
        sd_config_write_same(sdkp);
index 7281316a5ecba79203dc7351b280ab25cd5f2d11..a67d37c7e3c00f9518694f267aa75efd5c73398a 100644 (file)
@@ -271,7 +271,6 @@ int dw_spi_mid_init(struct dw_spi *dws)
        iounmap(clk_reg);
 
        dws->num_cs = 16;
-       dws->fifo_len = 40;     /* FIFO has 40 words buffer */
 
 #ifdef CONFIG_SPI_DW_MID_DMA
        dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
index d0d5542efc06db7a74b46a6a7230a4ce65ba53d5..8edcd1b84562109799281fb48867df8ce73ac7b2 100644 (file)
@@ -621,13 +621,13 @@ static void spi_hw_init(struct dw_spi *dws)
        if (!dws->fifo_len) {
                u32 fifo;
 
-               for (fifo = 2; fifo <= 257; fifo++) {
+               for (fifo = 2; fifo <= 256; fifo++) {
                        dw_writew(dws, DW_SPI_TXFLTR, fifo);
                        if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
                                break;
                }
 
-               dws->fifo_len = (fifo == 257) ? 0 : fifo;
+               dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
                dw_writew(dws, DW_SPI_TXFLTR, 0);
        }
 }
@@ -673,7 +673,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
        if (dws->dma_ops && dws->dma_ops->dma_init) {
                ret = dws->dma_ops->dma_init(dws);
                if (ret) {
-                       dev_warn(&master->dev, "DMA init failed\n");
+                       dev_warn(dev, "DMA init failed\n");
                        dws->dma_inited = 0;
                }
        }
index 4cda994d3f40cf86116b85ff21e71f7d8e33ed4d..9b80d54d4ddbea1fdef3f4ae78416c24a8e771e4 100644 (file)
@@ -342,8 +342,7 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
        /* Only alloc on first setup */
        chip = spi_get_ctldata(spi);
        if (chip == NULL) {
-               chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data),
-                                   GFP_KERNEL);
+               chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
                if (!chip)
                        return -ENOMEM;
        }
@@ -382,6 +381,16 @@ static int dspi_setup(struct spi_device *spi)
        return dspi_setup_transfer(spi, NULL);
 }
 
+static void dspi_cleanup(struct spi_device *spi)
+{
+       struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
+
+       dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
+                       spi->master->bus_num, spi->chip_select);
+
+       kfree(chip);
+}
+
 static irqreturn_t dspi_interrupt(int irq, void *dev_id)
 {
        struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
@@ -467,6 +476,7 @@ static int dspi_probe(struct platform_device *pdev)
        dspi->bitbang.master->setup = dspi_setup;
        dspi->bitbang.master->dev.of_node = pdev->dev.of_node;
 
+       master->cleanup = dspi_cleanup;
        master->mode_bits = SPI_CPOL | SPI_CPHA;
        master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
                                        SPI_BPW_MASK(16);
index 961b97d43b430914ed317b9623d06f940e39d797..fe1b7699fab634a31ae9e903146b775c467ddd80 100644 (file)
@@ -823,6 +823,10 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
        struct dma_slave_config slave_config = {};
        int ret;
 
+       /* use pio mode for i.mx6dl chip TKT238285 */
+       if (of_machine_is_compatible("fsl,imx6dl"))
+               return 0;
+
        /* Prepare for TX DMA: */
        master->dma_tx = dma_request_slave_channel(dev, "tx");
        if (!master->dma_tx) {
index 05c623cfb078d6503bd6d501cc1e821f07784d9e..23822e7df6c1c6e1e2caa18ea19cfcb069c3796d 100644 (file)
@@ -546,8 +546,8 @@ static void giveback(struct driver_data *drv_data)
                        cs_deassert(drv_data);
        }
 
-       spi_finalize_current_message(drv_data->master);
        drv_data->cur_chip = NULL;
+       spi_finalize_current_message(drv_data->master);
 }
 
 static void reset_sccr1(struct driver_data *drv_data)
index 96a5fc0878d86d4fc217b30d466621176b1bc24f..3ab7a21445fc253406eaf92abb87ce99974ce828 100644 (file)
@@ -82,7 +82,7 @@ struct sh_msiof_spi_priv {
 #define MDR1_SYNCMD_LR  0x30000000 /*   L/R mode */
 #define MDR1_SYNCAC_SHIFT       25 /* Sync Polarity (1 = Active-low) */
 #define MDR1_BITLSB_SHIFT       24 /* MSB/LSB First (1 = LSB first) */
-#define MDR1_FLD_MASK   0x000000c0 /* Frame Sync Signal Interval (0-3) */
+#define MDR1_FLD_MASK   0x0000000c /* Frame Sync Signal Interval (0-3) */
 #define MDR1_FLD_SHIFT           2
 #define MDR1_XXSTP      0x00000001 /* Transmission/Reception Stop on FIFO */
 /* TMDR1 */
index 930f6010203e96d6aadb3602fea868bf2a1b5bfd..65d610abe06e53fc0ec8cb34ccd367b7492b40f4 100644 (file)
@@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
                return 0;
        }
 
-       if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
+       if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
                CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
                return -EFAULT;
        }
index 81784c6f7b8872ed7a0b45aaea1ec0124633db17..77d8753f6ba40df0099dee88b05116f1684e6e93 100644 (file)
@@ -1,6 +1,7 @@
 config VIDEO_TLG2300
        tristate "Telegent TLG2300 USB video capture support (Deprecated)"
        depends on VIDEO_DEV && I2C && SND && DVB_CORE
+       depends on MEDIA_USB_SUPPORT
        select VIDEO_TUNER
        select VIDEO_TVEEPROM
        depends on RC_CORE
index 093535c6217b46000007f370fb047ea8ee24919c..120b70d72d79849e79fb52f2c8580cf7bf0538a1 100644 (file)
@@ -85,23 +85,20 @@ static struct nvec_chip *nvec_power_handle;
 static const struct mfd_cell nvec_devices[] = {
        {
                .name = "nvec-kbd",
-               .id = 1,
        },
        {
                .name = "nvec-mouse",
-               .id = 1,
        },
        {
                .name = "nvec-power",
-               .id = 1,
+               .id = 0,
        },
        {
                .name = "nvec-power",
-               .id = 2,
+               .id = 1,
        },
        {
                .name = "nvec-paz00",
-               .id = 1,
        },
 };
 
@@ -891,7 +888,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
                nvec_msg_free(nvec, msg);
        }
 
-       ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
+       ret = mfd_add_devices(nvec->dev, 0, nvec_devices,
                              ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
        if (ret)
                dev_err(nvec->dev, "error adding subdevices\n");
index 86c72ba0a0cd5cc019ef068dcec54886f88e7653..f8c5fc371c4cb4ff0b349e9eb2ff871bc60a0574 100644 (file)
@@ -2177,7 +2177,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
                /* Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
                /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
                /* Select VC1/VC2, CR215 = 0x02->0x06 */
-               bResult &= BBbWriteEmbedded(dwIoBase, 0xd7, 0x06);
+               bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
                /* }} */
 
                for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
index c8f739dd346eea7e4fc3287eb2eb5338955a1e46..70f870541f9268b598d78c8bf04792877b635231 100644 (file)
@@ -182,6 +182,14 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
        if (pDevice->byCurrentCh == uConnectionChannel)
                return bResult;
 
+       /* Set VGA to max sensitivity */
+       if (pDevice->bUpdateBBVGA &&
+           pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) {
+               pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
+
+               BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
+       }
+
        /* clear NAV */
        MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
 
index 83e4162c0094c4b2bed6853bc8cfb5636011191a..cd1a277d853b5d4cfa6f6e4156761f5d20558e25 100644 (file)
@@ -1232,7 +1232,7 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
 
        head_td = priv->apCurrTD[dma_idx];
 
-       head_td->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
+       head_td->m_td1TD1.byTCR = 0;
 
        head_td->pTDInfo->skb = skb;
 
@@ -1257,6 +1257,11 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
 
        priv->bPWBitOn = false;
 
+       /* Set TSR1 & ReqCount in TxDescHead */
+       head_td->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
+       head_td->m_td1TD1.wReqCount =
+                       cpu_to_le16((u16)head_td->pTDInfo->dwReqCount);
+
        head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
 
        if (dma_idx == TYPE_AC0DMA)
@@ -1500,9 +1505,11 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
                if (conf->enable_beacon) {
                        vnt_beacon_enable(priv, vif, conf);
 
-                       MACvRegBitsOn(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
+                       MACvRegBitsOn(priv->PortOffset, MAC_REG_TCR,
+                                     TCR_AUTOBCNTX);
                } else {
-                       MACvRegBitsOff(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
+                       MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR,
+                                      TCR_AUTOBCNTX);
                }
        }
 
index 61c39dd7ad013c9e40b784c70414fac62bac908b..b5b0155961f22e08959766d01cea8db241893a7e 100644 (file)
@@ -1204,13 +1204,10 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
 
        ptdCurr = (PSTxDesc)pHeadTD;
 
-       ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
+       ptdCurr->pTDInfo->dwReqCount = cbReqCount;
        ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
        ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
        ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
-       /* Set TSR1 & ReqCount in TxDescHead */
-       ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
-       ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
 
        return cbHeaderLength;
 }
index 55f6774f706f729b92fb2045abb7f9a33740c2b4..aebde3289c50de6722062dfdea21fa1c549090cd 100644 (file)
@@ -2027,10 +2027,10 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                goto reject;
        }
        if (!strncmp("=All", text_ptr, 4)) {
-               cmd->cmd_flags |= IFC_SENDTARGETS_ALL;
+               cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
        } else if (!strncmp("=iqn.", text_ptr, 5) ||
                   !strncmp("=eui.", text_ptr, 5)) {
-               cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE;
+               cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
        } else {
                pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
                goto reject;
@@ -3415,10 +3415,10 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
                return -ENOMEM;
        }
        /*
-        * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE
+        * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
         * explicit case..
         */
-       if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) {
+       if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
                text_ptr = strchr(text_in, '=');
                if (!text_ptr) {
                        pr_err("Unable to locate '=' string in text_in:"
@@ -3434,7 +3434,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
 
        spin_lock(&tiqn_lock);
        list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
-               if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) &&
+               if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
                     strcmp(tiqn->tiqn, text_ptr)) {
                        continue;
                }
@@ -3512,7 +3512,7 @@ eob:
                if (end_of_buf)
                        break;
 
-               if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE)
+               if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
                        break;
        }
        spin_unlock(&tiqn_lock);
index 09a522bae222d190ec92e157a42f13d2e361da4a..cbcff38ac9b7d30cf5b21882efeb34bb4fb39641 100644 (file)
@@ -135,8 +135,8 @@ enum cmd_flags_table {
        ICF_CONTIG_MEMORY                       = 0x00000020,
        ICF_ATTACHED_TO_RQUEUE                  = 0x00000040,
        ICF_OOO_CMDSN                           = 0x00000080,
-       IFC_SENDTARGETS_ALL                     = 0x00000100,
-       IFC_SENDTARGETS_SINGLE                  = 0x00000200,
+       ICF_SENDTARGETS_ALL                     = 0x00000100,
+       ICF_SENDTARGETS_SINGLE                  = 0x00000200,
 };
 
 /* struct iscsi_cmd->i_state */
index 7653cfb027a200cbec0dd51c95047708837c7227..58f49ff69b1424bf5feb33ed64eba495d8826851 100644 (file)
@@ -1103,51 +1103,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
 }
 EXPORT_SYMBOL(se_dev_set_queue_depth);
 
-int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
-{
-       int block_size = dev->dev_attrib.block_size;
-
-       if (dev->export_count) {
-               pr_err("dev[%p]: Unable to change SE Device"
-                       " fabric_max_sectors while export_count is %d\n",
-                       dev, dev->export_count);
-               return -EINVAL;
-       }
-       if (!fabric_max_sectors) {
-               pr_err("dev[%p]: Illegal ZERO value for"
-                       " fabric_max_sectors\n", dev);
-               return -EINVAL;
-       }
-       if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
-               pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
-                       " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
-                               DA_STATUS_MAX_SECTORS_MIN);
-               return -EINVAL;
-       }
-       if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
-               pr_err("dev[%p]: Passed fabric_max_sectors: %u"
-                       " greater than DA_STATUS_MAX_SECTORS_MAX:"
-                       " %u\n", dev, fabric_max_sectors,
-                       DA_STATUS_MAX_SECTORS_MAX);
-               return -EINVAL;
-       }
-       /*
-        * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
-        */
-       if (!block_size) {
-               block_size = 512;
-               pr_warn("Defaulting to 512 for zero block_size\n");
-       }
-       fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
-                                                     block_size);
-
-       dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
-       pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
-                       dev, fabric_max_sectors);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
-
 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
 {
        if (dev->export_count) {
@@ -1156,10 +1111,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
                        dev, dev->export_count);
                return -EINVAL;
        }
-       if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
+       if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
                pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
-                       " greater than fabric_max_sectors: %u\n", dev,
-                       optimal_sectors, dev->dev_attrib.fabric_max_sectors);
+                       " greater than hw_max_sectors: %u\n", dev,
+                       optimal_sectors, dev->dev_attrib.hw_max_sectors);
                return -EINVAL;
        }
 
@@ -1553,8 +1508,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
        dev->dev_attrib.unmap_granularity_alignment =
                                DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
        dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
-       dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
-       dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
 
        xcopy_lun = &dev->xcopy_lun;
        xcopy_lun->lun_se_dev = dev;
@@ -1595,6 +1548,7 @@ int target_configure_device(struct se_device *dev)
        dev->dev_attrib.hw_max_sectors =
                se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
                                         dev->dev_attrib.hw_block_size);
+       dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
 
        dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
        dev->creation_time = get_jiffies_64();
index c2aea099ea4adf7c0ee60ac7949fa02089162932..d836de200a03bcf24be54004df89c7d6d5039030 100644 (file)
@@ -621,7 +621,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
        struct fd_prot fd_prot;
        sense_reason_t rc;
        int ret = 0;
-
+       /*
+        * We are currently limited by the number of iovecs (2048) per
+        * single vfs_[writev,readv] call.
+        */
+       if (cmd->data_length > FD_MAX_BYTES) {
+               pr_err("FILEIO: Not able to process I/O of %u bytes due to"
+                      "FD_MAX_BYTES: %u iovec count limitiation\n",
+                       cmd->data_length, FD_MAX_BYTES);
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
        /*
         * Call vectorized fileio functions to map struct scatterlist
         * physical memory addresses to struct iovec virtual memory.
@@ -959,7 +968,6 @@ static struct configfs_attribute *fileio_backend_dev_attrs[] = {
        &fileio_dev_attrib_hw_block_size.attr,
        &fileio_dev_attrib_block_size.attr,
        &fileio_dev_attrib_hw_max_sectors.attr,
-       &fileio_dev_attrib_fabric_max_sectors.attr,
        &fileio_dev_attrib_optimal_sectors.attr,
        &fileio_dev_attrib_hw_queue_depth.attr,
        &fileio_dev_attrib_queue_depth.attr,
index 3efff94fbd9788838f565218965d180b78a67604..78346b850968ed8da28d88f35cf6a3ac15512a1b 100644 (file)
@@ -124,7 +124,7 @@ static int iblock_configure_device(struct se_device *dev)
        q = bdev_get_queue(bd);
 
        dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
-       dev->dev_attrib.hw_max_sectors = UINT_MAX;
+       dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
        dev->dev_attrib.hw_queue_depth = q->nr_requests;
 
        /*
@@ -883,7 +883,6 @@ static struct configfs_attribute *iblock_backend_dev_attrs[] = {
        &iblock_dev_attrib_hw_block_size.attr,
        &iblock_dev_attrib_block_size.attr,
        &iblock_dev_attrib_hw_max_sectors.attr,
-       &iblock_dev_attrib_fabric_max_sectors.attr,
        &iblock_dev_attrib_optimal_sectors.attr,
        &iblock_dev_attrib_hw_queue_depth.attr,
        &iblock_dev_attrib_queue_depth.attr,
index d56f2aaba9af9a6bb4b89d5c1080d426cba84e63..283cf786ef98be3d0594e847cc9749a072986b80 100644 (file)
@@ -528,6 +528,18 @@ static int core_scsi3_pr_seq_non_holder(
 
                        return 0;
                }
+       } else if (we && registered_nexus) {
+               /*
+                * Reads are allowed for Write Exclusive locks
+                * from all registrants.
+                */
+               if (cmd->data_direction == DMA_FROM_DEVICE) {
+                       pr_debug("Allowing READ CDB: 0x%02x for %s"
+                               " reservation\n", cdb[0],
+                               core_scsi3_pr_dump_type(pr_reg_type));
+
+                       return 0;
+               }
        }
        pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
                " for %s reservation\n", transport_dump_cmd_direction(cmd),
index 60ebd170a561943be8bde26cd40112bbd22d1e8e..98e83ac5661bcfe5b3b7b98d6c9fbf21bb27c14e 100644 (file)
@@ -657,7 +657,6 @@ static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
        &rd_mcp_dev_attrib_hw_block_size.attr,
        &rd_mcp_dev_attrib_block_size.attr,
        &rd_mcp_dev_attrib_hw_max_sectors.attr,
-       &rd_mcp_dev_attrib_fabric_max_sectors.attr,
        &rd_mcp_dev_attrib_optimal_sectors.attr,
        &rd_mcp_dev_attrib_hw_queue_depth.attr,
        &rd_mcp_dev_attrib_queue_depth.attr,
index 11bea1952435a397172ce69804a1088567bde03f..cd4bed7b27579b14a0f6e517eed23dacb6c1fe02 100644 (file)
@@ -953,21 +953,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
 
        if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
                unsigned long long end_lba;
-
-               if (sectors > dev->dev_attrib.fabric_max_sectors) {
-                       printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
-                               " big sectors %u exceeds fabric_max_sectors:"
-                               " %u\n", cdb[0], sectors,
-                               dev->dev_attrib.fabric_max_sectors);
-                       return TCM_INVALID_CDB_FIELD;
-               }
-               if (sectors > dev->dev_attrib.hw_max_sectors) {
-                       printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
-                               " big sectors %u exceeds backend hw_max_sectors:"
-                               " %u\n", cdb[0], sectors,
-                               dev->dev_attrib.hw_max_sectors);
-                       return TCM_INVALID_CDB_FIELD;
-               }
 check_lba:
                end_lba = dev->transport->get_blocks(dev) + 1;
                if (cmd->t_task_lba + sectors > end_lba) {
index 1307600fe7264cb55234b6b8e88d8cc15878c799..4c71657da56ab3cdc96b5c1f8d784722f10e2c1d 100644 (file)
@@ -505,7 +505,6 @@ static sense_reason_t
 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
 {
        struct se_device *dev = cmd->se_dev;
-       u32 max_sectors;
        int have_tp = 0;
        int opt, min;
 
@@ -539,9 +538,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
        /*
         * Set MAXIMUM TRANSFER LENGTH
         */
-       max_sectors = min(dev->dev_attrib.fabric_max_sectors,
-                         dev->dev_attrib.hw_max_sectors);
-       put_unaligned_be32(max_sectors, &buf[8]);
+       put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
 
        /*
         * Set OPTIMAL TRANSFER LENGTH
index 8bfa61c9693dbef6fe6267e16a48c566eae6ac4a..1a1bcf71ec9dbf3ed5ae05cbfadcf71a29a33f87 100644 (file)
@@ -784,9 +784,7 @@ static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int mino
        if (ret < 0)
                goto free_skb;
 
-       ret = genlmsg_end(skb, msg_header);
-       if (ret < 0)
-               goto free_skb;
+       genlmsg_end(skb, msg_header);
 
        ret = genlmsg_multicast(&tcmu_genl_family, skb, 0,
                                TCMU_MCGRP_CONFIG, GFP_KERNEL);
@@ -1118,7 +1116,6 @@ static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
        &tcmu_dev_attrib_hw_block_size.attr,
        &tcmu_dev_attrib_block_size.attr,
        &tcmu_dev_attrib_hw_max_sectors.attr,
-       &tcmu_dev_attrib_fabric_max_sectors.attr,
        &tcmu_dev_attrib_optimal_sectors.attr,
        &tcmu_dev_attrib_hw_queue_depth.attr,
        &tcmu_dev_attrib_queue_depth.attr,
index c1188ac053c9650ce163a3e9e37f34a877959dc2..2ccbc0788353e9488e3f730de80d52ecafaf9082 100644 (file)
@@ -608,6 +608,7 @@ static int imx_thermal_suspend(struct device *dev)
        regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
        regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
        data->mode = THERMAL_DEVICE_DISABLED;
+       clk_disable_unprepare(data->thermal_clk);
 
        return 0;
 }
@@ -617,6 +618,7 @@ static int imx_thermal_resume(struct device *dev)
        struct imx_thermal_data *data = dev_get_drvdata(dev);
        struct regmap *map = data->tempmon;
 
+       clk_prepare_enable(data->thermal_clk);
        /* Enabled thermal sensor after resume */
        regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
        regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
index 231cabc16e160e7318b4339a9d3d5210ff8b2ec3..2c2ec7666eb182c44c24225609ab9ecd723ea04a 100644 (file)
@@ -119,15 +119,11 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
                        continue;
 
                result = acpi_bus_get_device(trt->source, &adev);
-               if (!result)
-                       acpi_create_platform_device(adev);
-               else
+               if (result)
                        pr_warn("Failed to get source ACPI device\n");
 
                result = acpi_bus_get_device(trt->target, &adev);
-               if (!result)
-                       acpi_create_platform_device(adev);
-               else
+               if (result)
                        pr_warn("Failed to get target ACPI device\n");
        }
 
@@ -206,16 +202,12 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
 
                if (art->source) {
                        result = acpi_bus_get_device(art->source, &adev);
-                       if (!result)
-                               acpi_create_platform_device(adev);
-                       else
+                       if (result)
                                pr_warn("Failed to get source ACPI device\n");
                }
                if (art->target) {
                        result = acpi_bus_get_device(art->target, &adev);
-                       if (!result)
-                               acpi_create_platform_device(adev);
-                       else
+                       if (result)
                                pr_warn("Failed to get source ACPI device\n");
                }
        }
index 31bb553aac2633e6ca96ad6c115fe409c610bc35..0fe5dbbea9687053b835ee12eae553930b5ce1f9 100644 (file)
@@ -130,6 +130,8 @@ static int proc_thermal_add(struct device *dev,
        int ret;
 
        adev = ACPI_COMPANION(dev);
+       if (!adev)
+               return -ENODEV;
 
        status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf);
        if (ACPI_FAILURE(status))
index e145b66df444e65bb5cc4f9d7e7ed4dce7f76435..d717f3dab6f1410fc955daefb0497c2096298b56 100644 (file)
@@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid);
  *
  * Return: pointer to trip points table, NULL otherwise
  */
-const struct thermal_trip * const
+const struct thermal_trip *
 of_thermal_get_trip_points(struct thermal_zone_device *tz)
 {
        struct __thermal_zone *data = tz->devdata;
index 8803e693fe6868a620b76abda347e0e27645d6d3..2580a4872f90febeb5af00136e16054bb59e4903 100644 (file)
@@ -63,7 +63,7 @@ struct rcar_thermal_priv {
        struct mutex lock;
        struct list_head list;
        int id;
-       int ctemp;
+       u32 ctemp;
 };
 
 #define rcar_thermal_for_each_priv(pos, common)        \
@@ -145,7 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
 {
        struct device *dev = rcar_priv_to_dev(priv);
        int i;
-       int ctemp, old, new;
+       u32 ctemp, old, new;
        int ret = -EINVAL;
 
        mutex_lock(&priv->lock);
@@ -372,6 +372,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
        int i;
        int ret = -ENODEV;
        int idle = IDLE_INTERVAL;
+       u32 enr_bits = 0;
 
        common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
        if (!common)
@@ -390,7 +391,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
 
                /*
                 * platform has IRQ support.
-                * Then, drier use common register
+                * Then, driver uses common registers
                 */
 
                ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
@@ -408,9 +409,6 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                if (IS_ERR(common->base))
                        return PTR_ERR(common->base);
 
-               /* enable temperature comparation */
-               rcar_thermal_common_write(common, ENR, 0x00030303);
-
                idle = 0; /* polling delay is not needed */
        }
 
@@ -452,8 +450,15 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                        rcar_thermal_irq_enable(priv);
 
                list_move_tail(&priv->list, &common->head);
+
+               /* update ENR bits */
+               enr_bits |= 3 << (i * 8);
        }
 
+       /* enable temperature comparation */
+       if (irq)
+               rcar_thermal_common_write(common, ENR, enr_bits);
+
        platform_set_drvdata(pdev, common);
 
        dev_info(dev, "%d sensor probed\n", i);
index 87e0b0782023cb37696a92150d9f0c10bd09b198..48491d1a81d650f1d10812c1fb8d5e05ff99cbe1 100644 (file)
@@ -1759,11 +1759,7 @@ int thermal_generate_netlink_event(struct thermal_zone_device *tz,
        thermal_event->event = event;
 
        /* send multicast genetlink message */
-       result = genlmsg_end(skb, msg_header);
-       if (result < 0) {
-               nlmsg_free(skb);
-               return result;
-       }
+       genlmsg_end(skb, msg_header);
 
        result = genlmsg_multicast(&thermal_event_genl_family, skb, 0,
                                   0, GFP_ATOMIC);
index 9083e75206236c1953e4ad5d7858fbc5f164fad0..0531c752fbbb6680c40e939ad2a14fdc1830f357 100644 (file)
@@ -91,7 +91,7 @@ int of_parse_thermal_zones(void);
 void of_thermal_destroy_zones(void);
 int of_thermal_get_ntrips(struct thermal_zone_device *);
 bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
-const struct thermal_trip * const
+const struct thermal_trip *
 of_thermal_get_trip_points(struct thermal_zone_device *);
 #else
 static inline int of_parse_thermal_zones(void) { return 0; }
@@ -105,7 +105,7 @@ static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz,
 {
        return 0;
 }
-static inline const struct thermal_trip * const
+static inline const struct thermal_trip *
 of_thermal_get_trip_points(struct thermal_zone_device *tz)
 {
        return NULL;
index d2b496750d590c1e06d755b0b563f40c30f8d165..4ddfa60c922205513d16ed74a770eefd111fda83 100644 (file)
@@ -2399,17 +2399,12 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
 
        poll_wait(file, &tty->read_wait, wait);
        poll_wait(file, &tty->write_wait, wait);
-       if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
-               mask |= POLLHUP;
        if (input_available_p(tty, 1))
                mask |= POLLIN | POLLRDNORM;
-       else if (mask & POLLHUP) {
-               tty_flush_to_ldisc(tty);
-               if (input_available_p(tty, 1))
-                       mask |= POLLIN | POLLRDNORM;
-       }
        if (tty->packet && tty->link->ctrl_status)
                mask |= POLLPRI | POLLIN | POLLRDNORM;
+       if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+               mask |= POLLHUP;
        if (tty_hung_up_p(file))
                mask |= POLLHUP;
        if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
index 31feeb2d0a6688513e1dccf82521f281d9842b03..d1f8dc6aabcbe5bca9b6b6bbfc14e6f3f75e60c5 100644 (file)
@@ -1815,7 +1815,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
 }
 
 static int
-pci_wch_ch382_setup(struct serial_private *priv,
+pci_wch_ch38x_setup(struct serial_private *priv,
                     const struct pciserial_board *board,
                     struct uart_8250_port *port, int idx)
 {
@@ -1880,6 +1880,7 @@ pci_wch_ch382_setup(struct serial_private *priv,
 
 #define PCIE_VENDOR_ID_WCH             0x1c00
 #define PCIE_DEVICE_ID_WCH_CH382_2S1P  0x3250
+#define PCIE_DEVICE_ID_WCH_CH384_4S    0x3470
 
 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584        0x1584
@@ -2571,13 +2572,21 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .subdevice      = PCI_ANY_ID,
                .setup          = pci_wch_ch353_setup,
        },
-       /* WCH CH382 2S1P card (16750 clone) */
+       /* WCH CH382 2S1P card (16850 clone) */
        {
                .vendor         = PCIE_VENDOR_ID_WCH,
                .device         = PCIE_DEVICE_ID_WCH_CH382_2S1P,
                .subvendor      = PCI_ANY_ID,
                .subdevice      = PCI_ANY_ID,
-               .setup          = pci_wch_ch382_setup,
+               .setup          = pci_wch_ch38x_setup,
+       },
+       /* WCH CH384 4S card (16850 clone) */
+       {
+               .vendor         = PCIE_VENDOR_ID_WCH,
+               .device         = PCIE_DEVICE_ID_WCH_CH384_4S,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = pci_wch_ch38x_setup,
        },
        /*
         * ASIX devices with FIFO bug
@@ -2876,6 +2885,7 @@ enum pci_board_num_t {
        pbn_fintek_4,
        pbn_fintek_8,
        pbn_fintek_12,
+       pbn_wch384_4,
 };
 
 /*
@@ -3675,6 +3685,14 @@ static struct pciserial_board pci_boards[] = {
                .base_baud      = 115200,
                .first_offset   = 0x40,
        },
+
+       [pbn_wch384_4] = {
+               .flags          = FL_BASE0,
+               .num_ports      = 4,
+               .base_baud      = 115200,
+               .uart_offset    = 8,
+               .first_offset   = 0xC0,
+       },
 };
 
 static const struct pci_device_id blacklist[] = {
@@ -3687,6 +3705,7 @@ static const struct pci_device_id blacklist[] = {
        { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
        { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
        { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
+       { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
 };
 
 /*
@@ -5400,6 +5419,10 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_ANY_ID, PCI_ANY_ID,
                0, 0, pbn_b0_bt_2_115200 },
 
+       {       PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
+               PCI_ANY_ID, PCI_ANY_ID,
+               0, 0, pbn_wch384_4 },
+
        /*
         * Commtech, Inc. Fastcom adapters
         */
index 19273e31d22426071cc445e0c76655cc6394dbb2..107e807225752623c7f8cae56b17f00d4d003d37 100644 (file)
@@ -1757,32 +1757,43 @@ static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
 #endif
 
 #if defined(CONFIG_ARCH_EXYNOS)
+#define EXYNOS_COMMON_SERIAL_DRV_DATA                          \
+       .info = &(struct s3c24xx_uart_info) {                   \
+               .name           = "Samsung Exynos UART",        \
+               .type           = PORT_S3C6400,                 \
+               .has_divslot    = 1,                            \
+               .rx_fifomask    = S5PV210_UFSTAT_RXMASK,        \
+               .rx_fifoshift   = S5PV210_UFSTAT_RXSHIFT,       \
+               .rx_fifofull    = S5PV210_UFSTAT_RXFULL,        \
+               .tx_fifofull    = S5PV210_UFSTAT_TXFULL,        \
+               .tx_fifomask    = S5PV210_UFSTAT_TXMASK,        \
+               .tx_fifoshift   = S5PV210_UFSTAT_TXSHIFT,       \
+               .def_clk_sel    = S3C2410_UCON_CLKSEL0,         \
+               .num_clks       = 1,                            \
+               .clksel_mask    = 0,                            \
+               .clksel_shift   = 0,                            \
+       },                                                      \
+       .def_cfg = &(struct s3c2410_uartcfg) {                  \
+               .ucon           = S5PV210_UCON_DEFAULT,         \
+               .ufcon          = S5PV210_UFCON_DEFAULT,        \
+               .has_fracval    = 1,                            \
+       }                                                       \
+
 static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = {
-       .info = &(struct s3c24xx_uart_info) {
-               .name           = "Samsung Exynos4 UART",
-               .type           = PORT_S3C6400,
-               .has_divslot    = 1,
-               .rx_fifomask    = S5PV210_UFSTAT_RXMASK,
-               .rx_fifoshift   = S5PV210_UFSTAT_RXSHIFT,
-               .rx_fifofull    = S5PV210_UFSTAT_RXFULL,
-               .tx_fifofull    = S5PV210_UFSTAT_TXFULL,
-               .tx_fifomask    = S5PV210_UFSTAT_TXMASK,
-               .tx_fifoshift   = S5PV210_UFSTAT_TXSHIFT,
-               .def_clk_sel    = S3C2410_UCON_CLKSEL0,
-               .num_clks       = 1,
-               .clksel_mask    = 0,
-               .clksel_shift   = 0,
-       },
-       .def_cfg = &(struct s3c2410_uartcfg) {
-               .ucon           = S5PV210_UCON_DEFAULT,
-               .ufcon          = S5PV210_UFCON_DEFAULT,
-               .has_fracval    = 1,
-       },
+       EXYNOS_COMMON_SERIAL_DRV_DATA,
        .fifosize = { 256, 64, 16, 16 },
 };
+
+static struct s3c24xx_serial_drv_data exynos5433_serial_drv_data = {
+       EXYNOS_COMMON_SERIAL_DRV_DATA,
+       .fifosize = { 64, 256, 16, 256 },
+};
+
 #define EXYNOS4210_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos4210_serial_drv_data)
+#define EXYNOS5433_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos5433_serial_drv_data)
 #else
 #define EXYNOS4210_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#define EXYNOS5433_SERIAL_DRV_DATA (kernel_ulong_t)NULL
 #endif
 
 static struct platform_device_id s3c24xx_serial_driver_ids[] = {
@@ -1804,6 +1815,9 @@ static struct platform_device_id s3c24xx_serial_driver_ids[] = {
        }, {
                .name           = "exynos4210-uart",
                .driver_data    = EXYNOS4210_SERIAL_DRV_DATA,
+       }, {
+               .name           = "exynos5433-uart",
+               .driver_data    = EXYNOS5433_SERIAL_DRV_DATA,
        },
        { },
 };
@@ -1823,6 +1837,8 @@ static const struct of_device_id s3c24xx_uart_dt_match[] = {
                .data = (void *)S5PV210_SERIAL_DRV_DATA },
        { .compatible = "samsung,exynos4210-uart",
                .data = (void *)EXYNOS4210_SERIAL_DRV_DATA },
+       { .compatible = "samsung,exynos5433-uart",
+               .data = (void *)EXYNOS5433_SERIAL_DRV_DATA },
        {},
 };
 MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
index 57ca61b14670f1a540c3f82a24328a390edcfb3c..984605bb5bf1d593087bfffe485323538144b2c5 100644 (file)
@@ -2164,7 +2164,9 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
                break;
        }
 
-       dev_info(port->dev, "%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
+       printk(KERN_INFO "%s%s%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
+              port->dev ? dev_name(port->dev) : "",
+              port->dev ? ": " : "",
               drv->dev_name,
               drv->tty_driver->name_base + port->line,
               address, port->irq, port->uartclk / 16, uart_type(port));
index 4f35b43e24759c5ab2ae5bcfd7a0fd49672ed3d8..51f066aa375e64789e03b9a527679e3a0a7e1c8d 100644 (file)
@@ -1464,6 +1464,9 @@ static int tty_reopen(struct tty_struct *tty)
            driver->subtype == PTY_TYPE_MASTER)
                return -EIO;
 
+       if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
+               return -EBUSY;
+
        tty->count++;
 
        WARN_ON(!tty->ldisc);
@@ -2106,10 +2109,6 @@ retry_open:
                retval = -ENODEV;
        filp->f_flags = saved_flags;
 
-       if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) &&
-                                               !capable(CAP_SYS_ADMIN))
-               retval = -EBUSY;
-
        if (retval) {
 #ifdef TTY_DEBUG_HANGUP
                printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
index 5b9825a4538a67740a3ccf0d1ceea28d08f5d5c5..a57dc8866fc5ff938641686f7945916074723fe8 100644 (file)
@@ -669,7 +669,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
        if (!ci)
                return -ENOMEM;
 
-       platform_set_drvdata(pdev, ci);
        ci->dev = dev;
        ci->platdata = dev_get_platdata(dev);
        ci->imx28_write_fix = !!(ci->platdata->flags &
@@ -783,6 +782,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
                }
        }
 
+       platform_set_drvdata(pdev, ci);
        ret = devm_request_irq(dev, ci->irq, ci_irq, IRQF_SHARED,
                        ci->platdata->name, ci);
        if (ret)
index c1694cff1eafd287c600435a45c861a4406e6514..48731d0bab357a75232fdbd7b4063fe29af3ad4e 100644 (file)
@@ -91,6 +91,7 @@ static int host_start(struct ci_hdrc *ci)
        if (!hcd)
                return -ENOMEM;
 
+       dev_set_drvdata(ci->dev, ci);
        hcd->rsrc_start = ci->hw_bank.phys;
        hcd->rsrc_len = ci->hw_bank.size;
        hcd->regs = ci->hw_bank.abs;
index de0c9c9d7091903af17a388a99ac7da963374a21..a6315abe7b7cec272f28265157f1e65a24f71ddb 100644 (file)
@@ -55,6 +55,11 @@ static int is_targeted(struct usb_device *dev)
             le16_to_cpu(dev->descriptor.idProduct) == 0xbadd))
                return 0;
 
+       /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */
+       if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a &&
+            le16_to_cpu(dev->descriptor.idProduct) == 0x0200))
+               return 1;
+
        /* NOTE: can't use usb_match_id() since interface caches
         * aren't set up yet. this is cut/paste from that code.
         */
index 0ffb4ed0a9451af2465beed78e0e30e560299f06..41e510ae8c837ea337135c4ec8ddbe26fccac275 100644 (file)
@@ -179,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
                        USB_QUIRK_IGNORE_REMOTE_WAKEUP },
 
+       /* Protocol and OTG Electrical Test Device */
+       { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
+                       USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+
        { }  /* terminating entry must be last */
 };
 
index ad43c5bc1ef19eadfc074f42690183fcbcf37cef..02e3e2d4ea5658c0ddd6de8b1cb1729425a3c0ab 100644 (file)
@@ -476,13 +476,13 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
        u32 gintsts;
        irqreturn_t retval = IRQ_NONE;
 
+       spin_lock(&hsotg->lock);
+
        if (!dwc2_is_controller_alive(hsotg)) {
                dev_warn(hsotg->dev, "Controller is dead\n");
                goto out;
        }
 
-       spin_lock(&hsotg->lock);
-
        gintsts = dwc2_read_common_intr(hsotg);
        if (gintsts & ~GINTSTS_PRTINT)
                retval = IRQ_HANDLED;
@@ -515,8 +515,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
                }
        }
 
-       spin_unlock(&hsotg->lock);
 out:
+       spin_unlock(&hsotg->lock);
        return retval;
 }
 EXPORT_SYMBOL_GPL(dwc2_handle_common_intr);
index 200168ec2d7567e63ce9b8a8fa4afb0009b85831..79242008085bbed84a9e7caf142077215d89aab7 100644 (file)
@@ -2567,7 +2567,7 @@ error:
  * s3c_hsotg_ep_disable - disable given endpoint
  * @ep: The endpoint to disable.
  */
-static int s3c_hsotg_ep_disable(struct usb_ep *ep)
+static int s3c_hsotg_ep_disable_force(struct usb_ep *ep, bool force)
 {
        struct s3c_hsotg_ep *hs_ep = our_ep(ep);
        struct dwc2_hsotg *hsotg = hs_ep->parent;
@@ -2588,7 +2588,7 @@ static int s3c_hsotg_ep_disable(struct usb_ep *ep)
 
        spin_lock_irqsave(&hsotg->lock, flags);
        /* terminate all requests with shutdown */
-       kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);
+       kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, force);
 
        hsotg->fifo_map &= ~(1<<hs_ep->fifo_index);
        hs_ep->fifo_index = 0;
@@ -2609,6 +2609,10 @@ static int s3c_hsotg_ep_disable(struct usb_ep *ep)
        return 0;
 }
 
+static int s3c_hsotg_ep_disable(struct usb_ep *ep)
+{
+       return s3c_hsotg_ep_disable_force(ep, false);
+}
 /**
  * on_list - check request is on the given endpoint
  * @ep: The endpoint to check.
@@ -2924,7 +2928,7 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget)
 
        /* all endpoints should be shutdown */
        for (ep = 1; ep < hsotg->num_of_eps; ep++)
-               s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
+               s3c_hsotg_ep_disable_force(&hsotg->eps[ep].ep, true);
 
        spin_lock_irqsave(&hsotg->lock, flags);
 
index 7c4faf738747bdab2bf5a73e39297f8ae437aeb7..b642a2f998f9eaf8079b36e1b22612042e353288 100644 (file)
@@ -33,6 +33,8 @@
 #define PCI_DEVICE_ID_INTEL_BYT                0x0f37
 #define PCI_DEVICE_ID_INTEL_MRFLD      0x119e
 #define PCI_DEVICE_ID_INTEL_BSW                0x22B7
+#define PCI_DEVICE_ID_INTEL_SPTLP      0x9d30
+#define PCI_DEVICE_ID_INTEL_SPTH       0xa130
 
 struct dwc3_pci {
        struct device           *dev;
@@ -219,6 +221,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
        {  }    /* Terminating Entry */
 };
index f03b136ecfce33b3b6936314d1fdebabb4ce936d..8f65ab3a3b928f3872dcc42b1ddfef38d4ba5d26 100644 (file)
@@ -882,8 +882,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
 
                                if (i == (request->num_mapped_sgs - 1) ||
                                                sg_is_last(s)) {
-                                       if (list_is_last(&req->list,
-                                                       &dep->request_list))
+                                       if (list_empty(&dep->request_list))
                                                last_one = true;
                                        chain = false;
                                }
@@ -901,6 +900,9 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
                                if (last_one)
                                        break;
                        }
+
+                       if (last_one)
+                               break;
                } else {
                        dma = req->request.dma;
                        length = req->request.length;
index 6e04e302dc3a85b0dba95cc90c12e97323fa635c..a1bc3e3a0b09f740342e949024db337e030e9ed9 100644 (file)
@@ -399,8 +399,9 @@ static int hidg_setup(struct usb_function *f,
        value   = __le16_to_cpu(ctrl->wValue);
        length  = __le16_to_cpu(ctrl->wLength);
 
-       VDBG(cdev, "hid_setup crtl_request : bRequestType:0x%x bRequest:0x%x "
-               "Value:0x%x\n", ctrl->bRequestType, ctrl->bRequest, value);
+       VDBG(cdev,
+            "%s crtl_request : bRequestType:0x%x bRequest:0x%x Value:0x%x\n",
+            __func__, ctrl->bRequestType, ctrl->bRequest, value);
 
        switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
        case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
index a90440300735fecaa9502c0234e7abbb81db860b..259b656c0b3ec7bde9e119488f46ded351bb7300 100644 (file)
@@ -520,7 +520,7 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req)
                req = midi_alloc_ep_req(ep, midi->buflen);
 
        if (!req) {
-               ERROR(midi, "gmidi_transmit: alloc_ep_request failed\n");
+               ERROR(midi, "%s: alloc_ep_request failed\n", __func__);
                return;
        }
        req->length = 0;
index f7b20329320583d05c096882d8e04cec7d905097..e9715845f82e1dc825690c05f8cc08c2d8e41df7 100644 (file)
@@ -897,7 +897,6 @@ static void f_audio_free_inst(struct usb_function_instance *f)
        struct f_uac1_opts *opts;
 
        opts = container_of(f, struct f_uac1_opts, func_inst);
-       gaudio_cleanup(opts->card);
        if (opts->fn_play_alloc)
                kfree(opts->fn_play);
        if (opts->fn_cap_alloc)
@@ -935,6 +934,7 @@ static void f_audio_free(struct usb_function *f)
        struct f_audio *audio = func_to_audio(f);
        struct f_uac1_opts *opts;
 
+       gaudio_cleanup(&audio->card);
        opts = container_of(f->fi, struct f_uac1_opts, func_inst);
        kfree(audio);
        mutex_lock(&opts->lock);
index c744e4975d744c4fb710a429ec055616fffd4833..db49ec4c748e9469bd694645c8cfc22df7c829fa 100644 (file)
@@ -441,6 +441,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
        kbuf = memdup_user(buf, len);
        if (IS_ERR(kbuf)) {
                value = PTR_ERR(kbuf);
+               kbuf = NULL;
                goto free1;
        }
 
@@ -449,6 +450,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
                data->name, len, (int) value);
 free1:
        mutex_unlock(&data->lock);
+       kfree (kbuf);
        return value;
 }
 
index ce882371786b184d7b02f4a451d6c8c1b7a80efa..9f93bed42052cb5c910d5f34218287e58a558c6b 100644 (file)
@@ -716,10 +716,10 @@ static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
        req->using_dma = 1;
        req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
                        | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
-                       | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
+                       | USBA_DMA_END_BUF_EN;
 
-       if (ep->is_in)
-               req->ctrl |= USBA_DMA_END_BUF_EN;
+       if (!ep->is_in)
+               req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
 
        /*
         * Add this request to the queue and submit for DMA if
@@ -828,7 +828,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 {
        struct usba_ep *ep = to_usba_ep(_ep);
        struct usba_udc *udc = ep->udc;
-       struct usba_request *req = to_usba_req(_req);
+       struct usba_request *req;
        unsigned long flags;
        u32 status;
 
@@ -837,6 +837,16 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 
        spin_lock_irqsave(&udc->lock, flags);
 
+       list_for_each_entry(req, &ep->queue, queue) {
+               if (&req->req == _req)
+                       break;
+       }
+
+       if (&req->req != _req) {
+               spin_unlock_irqrestore(&udc->lock, flags);
+               return -EINVAL;
+       }
+
        if (req->using_dma) {
                /*
                 * If this request is currently being transferred,
@@ -1563,7 +1573,6 @@ static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
        if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
                DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
                receive_data(ep);
-               usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
        }
 }
 
index ff67ceac77c410a8f6a9ab27fc63f63afd9dc01b..d4fe8d769bd673c384707fae714944885061911d 100644 (file)
@@ -718,10 +718,11 @@ static int ep_queue(struct bdc_ep *ep, struct bdc_req *req)
        struct bdc *bdc;
        int ret = 0;
 
-       bdc = ep->bdc;
        if (!req || !ep || !ep->usb_ep.desc)
                return -EINVAL;
 
+       bdc = ep->bdc;
+
        req->usb_req.actual = 0;
        req->usb_req.status = -EINPROGRESS;
        req->epnum = ep->ep_num;
index e113fd73aeae7148b0cbcd0d424aebb16d84a694..f9a332775c4781e57faf7bd584d5d58cbf5397e3 100644 (file)
@@ -1581,6 +1581,10 @@ iso_stream_schedule (
        else
                next = (now + 2 + 7) & ~0x07;   /* full frame cache */
 
+       /* If needed, initialize last_iso_frame so that this URB will be seen */
+       if (ehci->isoc_count == 0)
+               ehci->last_iso_frame = now >> 3;
+
        /*
         * Use ehci->last_iso_frame as the base.  There can't be any
         * TDs scheduled for earlier than that.
@@ -1600,11 +1604,11 @@ iso_stream_schedule (
         */
        now2 = (now - base) & (mod - 1);
 
-       /* Is the schedule already full? */
+       /* Is the schedule about to wrap around? */
        if (unlikely(!empty && start < period)) {
-               ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n",
+               ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
                                urb, stream->next_uframe, base, period, mod);
-               status = -ENOSPC;
+               status = -EFBIG;
                goto fail;
        }
 
@@ -1671,10 +1675,6 @@ iso_stream_schedule (
        urb->start_frame = start & (mod - 1);
        if (!stream->highspeed)
                urb->start_frame >>= 3;
-
-       /* Make sure scan_isoc() sees these */
-       if (ehci->isoc_count == 0)
-               ehci->last_iso_frame = now >> 3;
        return status;
 
  fail:
index 19a9af1b4d749cd577e1215a200679f9a1c8b37d..ff9af29b4e9f6b0ea4bfa1decd8050fcbede061d 100644 (file)
@@ -451,7 +451,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
 
        u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
        if (IS_ERR(u_phy)) {
-               err = PTR_ERR(u_phy);
+               err = -EPROBE_DEFER;
                goto cleanup_clk_en;
        }
        hcd->usb_phy = u_phy;
index dd483c13565bb7bbedc561d6e39fccee1477c279..ce636466edb7a390efb346d3b0f511b9d4eb5404 100644 (file)
@@ -567,7 +567,8 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
 {
        void __iomem *base;
        u32 control;
-       u32 fminterval;
+       u32 fminterval = 0;
+       bool no_fminterval = false;
        int cnt;
 
        if (!mmio_resource_enabled(pdev, 0))
@@ -577,6 +578,13 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
        if (base == NULL)
                return;
 
+       /*
+        * ULi M5237 OHCI controller locks the whole system when accessing
+        * the OHCI_FMINTERVAL offset.
+        */
+       if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
+               no_fminterval = true;
+
        control = readl(base + OHCI_CONTROL);
 
 /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
@@ -615,7 +623,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
        }
 
        /* software reset of the controller, preserving HcFmInterval */
-       fminterval = readl(base + OHCI_FMINTERVAL);
+       if (!no_fminterval)
+               fminterval = readl(base + OHCI_FMINTERVAL);
+
        writel(OHCI_HCR, base + OHCI_CMDSTATUS);
 
        /* reset requires max 10 us delay */
@@ -624,7 +634,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
                        break;
                udelay(1);
        }
-       writel(fminterval, base + OHCI_FMINTERVAL);
+
+       if (!no_fminterval)
+               writel(fminterval, base + OHCI_FMINTERVAL);
 
        /* Now the controller is safely in SUSPEND and nothing can wake it up */
        iounmap(base);
index 142b601f95636fdff622bca8c4fb1a9aef87093b..7f76c8a12f89db425e19c4f3a2a5200de542dbd7 100644 (file)
@@ -82,6 +82,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                                "must be suspended extra slowly",
                                pdev->revision);
                }
+               if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK)
+                       xhci->quirks |= XHCI_BROKEN_STREAMS;
                /* Fresco Logic confirms: all revisions of this chip do not
                 * support MSI, even though some of them claim to in their PCI
                 * capabilities.
index 01fcbb5eb06e7ec3d03bcd81d90941f889c5654a..c50d8d202618521793b37a8bd57d7b343cd757fd 100644 (file)
@@ -3803,6 +3803,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
                return -EINVAL;
        }
 
+       if (setup == SETUP_CONTEXT_ONLY) {
+               slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+               if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
+                   SLOT_STATE_DEFAULT) {
+                       xhci_dbg(xhci, "Slot already in default state\n");
+                       return 0;
+               }
+       }
+
        command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
        if (!command)
                return -ENOMEM;
index 9d68372dd9aaa01d72b3a192959b3bccc2b2de93..b005010240e5b30df0823d220664f5829855ac81 100644 (file)
@@ -72,6 +72,8 @@ config USB_MUSB_DA8XX
 
 config USB_MUSB_TUSB6010
        tristate "TUSB6010"
+       depends on ARCH_OMAP2PLUS || COMPILE_TEST
+       depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
 
 config USB_MUSB_OMAP2PLUS
        tristate "OMAP2430 and onwards"
@@ -85,6 +87,7 @@ config USB_MUSB_AM35X
 config USB_MUSB_DSPS
        tristate "TI DSPS platforms"
        select USB_MUSB_AM335X_CHILD
+       depends on ARCH_OMAP2PLUS || COMPILE_TEST
        depends on OF_IRQ
 
 config USB_MUSB_BLACKFIN
@@ -93,6 +96,7 @@ config USB_MUSB_BLACKFIN
 
 config USB_MUSB_UX500
        tristate "Ux500 platforms"
+       depends on ARCH_U8500 || COMPILE_TEST
 
 config USB_MUSB_JZ4740
        tristate "JZ4740"
index a441a2de8619e51d5f639e47332f3ec120d2a4f2..1782501456139aeb8f970fa51311a215a909d555 100644 (file)
@@ -63,7 +63,7 @@ static void bfin_writew(void __iomem *addr, unsigned offset, u16 data)
        bfin_write16(addr + offset, data);
 }
 
-static void binf_writel(void __iomem *addr, unsigned offset, u32 data)
+static void bfin_writel(void __iomem *addr, unsigned offset, u32 data)
 {
        bfin_write16(addr + offset, (u16)data);
 }
index f64fd964dc6d544b0fecee86a1fd9bd85993862a..c39a16ad78329194e78135464283dea4e760a4cc 100644 (file)
@@ -628,9 +628,9 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
                ret = of_property_read_string_index(np, "dma-names", i, &str);
                if (ret)
                        goto err;
-               if (!strncmp(str, "tx", 2))
+               if (strstarts(str, "tx"))
                        is_tx = 1;
-               else if (!strncmp(str, "rx", 2))
+               else if (strstarts(str, "rx"))
                        is_tx = 0;
                else {
                        dev_err(dev, "Wrong dmatype %s\n", str);
index ad3701a9738964d5f7846e76ec69a31660badda1..48131aa8472cfef70b19d6a2a72db0a8d6b2db85 100644 (file)
@@ -59,20 +59,12 @@ static const struct musb_register_map musb_regmap[] = {
        { "RxMaxPp",    MUSB_RXMAXP,    16 },
        { "RxCSR",      MUSB_RXCSR,     16 },
        { "RxCount",    MUSB_RXCOUNT,   16 },
-       { "ConfigData", MUSB_CONFIGDATA,8 },
        { "IntrRxE",    MUSB_INTRRXE,   16 },
        { "IntrTxE",    MUSB_INTRTXE,   16 },
        { "IntrUsbE",   MUSB_INTRUSBE,  8 },
        { "DevCtl",     MUSB_DEVCTL,    8 },
-       { "BabbleCtl",  MUSB_BABBLE_CTL,8 },
-       { "TxFIFOsz",   MUSB_TXFIFOSZ,  8 },
-       { "RxFIFOsz",   MUSB_RXFIFOSZ,  8 },
-       { "TxFIFOadd",  MUSB_TXFIFOADD, 16 },
-       { "RxFIFOadd",  MUSB_RXFIFOADD, 16 },
        { "VControl",   0x68,           32 },
        { "HWVers",     0x69,           16 },
-       { "EPInfo",     MUSB_EPINFO,    8 },
-       { "RAMInfo",    MUSB_RAMINFO,   8 },
        { "LinkInfo",   MUSB_LINKINFO,  8 },
        { "VPLen",      MUSB_VPLEN,     8 },
        { "HS_EOF1",    MUSB_HS_EOF1,   8 },
@@ -103,6 +95,16 @@ static const struct musb_register_map musb_regmap[] = {
        { "DMA_CNTLch7",        0x274,  16 },
        { "DMA_ADDRch7",        0x278,  32 },
        { "DMA_COUNTch7",       0x27C,  32 },
+#ifndef CONFIG_BLACKFIN
+       { "ConfigData", MUSB_CONFIGDATA,8 },
+       { "BabbleCtl",  MUSB_BABBLE_CTL,8 },
+       { "TxFIFOsz",   MUSB_TXFIFOSZ,  8 },
+       { "RxFIFOsz",   MUSB_RXFIFOSZ,  8 },
+       { "TxFIFOadd",  MUSB_TXFIFOADD, 16 },
+       { "RxFIFOadd",  MUSB_RXFIFOADD, 16 },
+       { "EPInfo",     MUSB_EPINFO,    8 },
+       { "RAMInfo",    MUSB_RAMINFO,   8 },
+#endif
        {  }    /* Terminating Entry */
 };
 
@@ -197,30 +199,30 @@ static ssize_t musb_test_mode_write(struct file *file,
        if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
                return -EFAULT;
 
-       if (!strncmp(buf, "force host", 9))
+       if (strstarts(buf, "force host"))
                test = MUSB_TEST_FORCE_HOST;
 
-       if (!strncmp(buf, "fifo access", 11))
+       if (strstarts(buf, "fifo access"))
                test = MUSB_TEST_FIFO_ACCESS;
 
-       if (!strncmp(buf, "force full-speed", 15))
+       if (strstarts(buf, "force full-speed"))
                test = MUSB_TEST_FORCE_FS;
 
-       if (!strncmp(buf, "force high-speed", 15))
+       if (strstarts(buf, "force high-speed"))
                test = MUSB_TEST_FORCE_HS;
 
-       if (!strncmp(buf, "test packet", 10)) {
+       if (strstarts(buf, "test packet")) {
                test = MUSB_TEST_PACKET;
                musb_load_testpacket(musb);
        }
 
-       if (!strncmp(buf, "test K", 6))
+       if (strstarts(buf, "test K"))
                test = MUSB_TEST_K;
 
-       if (!strncmp(buf, "test J", 6))
+       if (strstarts(buf, "test J"))
                test = MUSB_TEST_J;
 
-       if (!strncmp(buf, "test SE0 NAK", 12))
+       if (strstarts(buf, "test SE0 NAK"))
                test = MUSB_TEST_SE0_NAK;
 
        musb_writeb(musb->mregs, MUSB_TESTMODE, test);
index 23d474d3d7f466188bcf4cee8c8f974e4274d22f..883a9adfdfff5f0c1643036e0be7d7d22d1e73a8 100644 (file)
@@ -2663,7 +2663,6 @@ void musb_host_cleanup(struct musb *musb)
        if (musb->port_mode == MUSB_PORT_MODE_GADGET)
                return;
        usb_remove_hcd(musb->hcd);
-       musb->hcd = NULL;
 }
 
 void musb_host_free(struct musb *musb)
index 699e38c73d82c2ae76feddfe07f85a54ca76dd73..697a741a0cb1ed36ff336af31d8c3cd7d2fd32a5 100644 (file)
@@ -338,7 +338,6 @@ static void mv_otg_update_inputs(struct mv_otg *mvotg)
 static void mv_otg_update_state(struct mv_otg *mvotg)
 {
        struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
-       struct usb_phy *phy = &mvotg->phy;
        int old_state = mvotg->phy.otg->state;
 
        switch (old_state) {
@@ -858,10 +857,10 @@ static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state)
 {
        struct mv_otg *mvotg = platform_get_drvdata(pdev);
 
-       if (mvotg->phy.state != OTG_STATE_B_IDLE) {
+       if (mvotg->phy.otg->state != OTG_STATE_B_IDLE) {
                dev_info(&pdev->dev,
                         "OTG state is not B_IDLE, it is %d!\n",
-                        mvotg->phy.state);
+                        mvotg->phy.otg->state);
                return -EAGAIN;
        }
 
index b4066a001ba01573f9546749b7d4978180c48dc5..2f9735b3533891c85dc907d8b1cc95a1e50d2559 100644 (file)
@@ -59,6 +59,9 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node)
 {
        struct usb_phy  *phy;
 
+       if (!of_device_is_available(node))
+               return ERR_PTR(-ENODEV);
+
        list_for_each_entry(phy, &phy_list, head) {
                if (node != phy->dev->of_node)
                        continue;
@@ -66,7 +69,7 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node)
                return phy;
        }
 
-       return ERR_PTR(-ENODEV);
+       return ERR_PTR(-EPROBE_DEFER);
 }
 
 static void devm_usb_phy_release(struct device *dev, void *res)
@@ -190,10 +193,13 @@ struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
        spin_lock_irqsave(&phy_lock, flags);
 
        phy = __of_usb_find_phy(node);
-       if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
-               if (!IS_ERR(phy))
-                       phy = ERR_PTR(-EPROBE_DEFER);
+       if (IS_ERR(phy)) {
+               devres_free(ptr);
+               goto err1;
+       }
 
+       if (!try_module_get(phy->dev->driver->owner)) {
+               phy = ERR_PTR(-ENODEV);
                devres_free(ptr);
                goto err1;
        }
index 8d7fc48b1f307efffa1e5ec40a6e3250b852a54c..29fa1c3d0089bee738ed4f54a8b65d4f82dd0c03 100644 (file)
@@ -46,6 +46,8 @@ static struct console usbcons;
  * ------------------------------------------------------------
  */
 
+static const struct tty_operations usb_console_fake_tty_ops = {
+};
 
 /*
  * The parsing of the command line works exactly like the
@@ -137,13 +139,17 @@ static int usb_console_setup(struct console *co, char *options)
                                goto reset_open_count;
                        }
                        kref_init(&tty->kref);
-                       tty_port_tty_set(&port->port, tty);
                        tty->driver = usb_serial_tty_driver;
                        tty->index = co->index;
+                       init_ldsem(&tty->ldisc_sem);
+                       INIT_LIST_HEAD(&tty->tty_files);
+                       kref_get(&tty->driver->kref);
+                       tty->ops = &usb_console_fake_tty_ops;
                        if (tty_init_termios(tty)) {
                                retval = -ENOMEM;
-                               goto free_tty;
+                               goto put_tty;
                        }
+                       tty_port_tty_set(&port->port, tty);
                }
 
                /* only call the device specific open if this
@@ -161,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
                        serial->type->set_termios(tty, port, &dummy);
 
                        tty_port_tty_set(&port->port, NULL);
-                       kfree(tty);
+                       tty_kref_put(tty);
                }
                set_bit(ASYNCB_INITIALIZED, &port->port.flags);
        }
@@ -177,8 +183,8 @@ static int usb_console_setup(struct console *co, char *options)
 
  fail:
        tty_port_tty_set(&port->port, NULL);
free_tty:
-       kfree(tty);
put_tty:
+       tty_kref_put(tty);
  reset_open_count:
        port->port.count = 0;
        usb_autopm_put_interface(serial->interface);
index 6c4eb3cf5efd599653641e5d96d20b05610a6ed5..f4c56fc1a9f64dd32fae247c351c1b34e0d6400e 100644 (file)
@@ -120,10 +120,12 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
        { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
        { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
-       { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
+       { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
+       { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
        { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+       { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
index 1bd192290b08df0fb697fa91f2517b6756680012..ccf1df7c4b80f3f7a596fa3d1d8904eb64a78db6 100644 (file)
@@ -286,7 +286,7 @@ static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
 
        res = usb_submit_urb(port->read_urbs[index], mem_flags);
        if (res) {
-               if (res != -EPERM) {
+               if (res != -EPERM && res != -ENODEV) {
                        dev_err(&port->dev,
                                        "%s - usb_submit_urb failed: %d\n",
                                        __func__, res);
@@ -373,7 +373,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
                                                        __func__, urb->status);
                return;
        default:
-               dev_err(&port->dev, "%s - nonzero urb status: %d\n",
+               dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
                                                        __func__, urb->status);
                goto resubmit;
        }
index 077c714f1285171ee3b9e4c418e0df42f60cd42c..e07b15ed58148698d939370ac8bf73b20141669a 100644 (file)
@@ -410,6 +410,8 @@ static void usa26_instat_callback(struct urb *urb)
        }
        port = serial->port[msg->port];
        p_priv = usb_get_serial_port_data(port);
+       if (!p_priv)
+               goto resubmit;
 
        /* Update handshaking pin state information */
        old_dcd_state = p_priv->dcd_state;
@@ -420,7 +422,7 @@ static void usa26_instat_callback(struct urb *urb)
 
        if (old_dcd_state != p_priv->dcd_state)
                tty_port_tty_hangup(&port->port, true);
-
+resubmit:
        /* Resubmit urb so we continue receiving */
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err != 0)
@@ -527,6 +529,8 @@ static void usa28_instat_callback(struct urb *urb)
        }
        port = serial->port[msg->port];
        p_priv = usb_get_serial_port_data(port);
+       if (!p_priv)
+               goto resubmit;
 
        /* Update handshaking pin state information */
        old_dcd_state = p_priv->dcd_state;
@@ -537,7 +541,7 @@ static void usa28_instat_callback(struct urb *urb)
 
        if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
                tty_port_tty_hangup(&port->port, true);
-
+resubmit:
                /* Resubmit urb so we continue receiving */
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err != 0)
@@ -607,6 +611,8 @@ static void usa49_instat_callback(struct urb *urb)
        }
        port = serial->port[msg->portNumber];
        p_priv = usb_get_serial_port_data(port);
+       if (!p_priv)
+               goto resubmit;
 
        /* Update handshaking pin state information */
        old_dcd_state = p_priv->dcd_state;
@@ -617,7 +623,7 @@ static void usa49_instat_callback(struct urb *urb)
 
        if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
                tty_port_tty_hangup(&port->port, true);
-
+resubmit:
        /* Resubmit urb so we continue receiving */
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err != 0)
@@ -855,6 +861,8 @@ static void usa90_instat_callback(struct urb *urb)
 
        port = serial->port[0];
        p_priv = usb_get_serial_port_data(port);
+       if (!p_priv)
+               goto resubmit;
 
        /* Update handshaking pin state information */
        old_dcd_state = p_priv->dcd_state;
@@ -865,7 +873,7 @@ static void usa90_instat_callback(struct urb *urb)
 
        if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
                tty_port_tty_hangup(&port->port, true);
-
+resubmit:
        /* Resubmit urb so we continue receiving */
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err != 0)
@@ -926,6 +934,8 @@ static void usa67_instat_callback(struct urb *urb)
 
        port = serial->port[msg->port];
        p_priv = usb_get_serial_port_data(port);
+       if (!p_priv)
+               goto resubmit;
 
        /* Update handshaking pin state information */
        old_dcd_state = p_priv->dcd_state;
@@ -934,7 +944,7 @@ static void usa67_instat_callback(struct urb *urb)
 
        if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
                tty_port_tty_hangup(&port->port, true);
-
+resubmit:
        /* Resubmit urb so we continue receiving */
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err != 0)
index 7a4c21b4f67613f7bf64839b4cf4faf09804355e..efdcee15b52030e455ce3c6e17d401d4a5e2660d 100644 (file)
@@ -234,6 +234,8 @@ static void option_instat_callback(struct urb *urb);
 
 #define QUALCOMM_VENDOR_ID                     0x05C6
 
+#define SIERRA_VENDOR_ID                       0x1199
+
 #define CMOTECH_VENDOR_ID                      0x16d8
 #define CMOTECH_PRODUCT_6001                   0x6001
 #define CMOTECH_PRODUCT_CMU_300                        0x6002
@@ -512,7 +514,7 @@ enum option_blacklist_reason {
                OPTION_BLACKLIST_RESERVED_IF = 2
 };
 
-#define MAX_BL_NUM  8
+#define MAX_BL_NUM  11
 struct option_blacklist_info {
        /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */
        const unsigned long sendsetup;
@@ -601,6 +603,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
        .reserved = BIT(1) | BIT(5),
 };
 
+static const struct option_blacklist_info sierra_mc73xx_blacklist = {
+       .sendsetup = BIT(0) | BIT(2),
+       .reserved = BIT(8) | BIT(10) | BIT(11),
+};
+
 static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1098,6 +1105,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+       { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
+         .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
index cb3e14780a7e0c6182e5f9bf3ad505b61fa75684..9c63897b3a564012ea63f99b9e5e73bc48b93d36 100644 (file)
@@ -142,7 +142,6 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x0f3d, 0x68a2)},   /* Sierra Wireless MC7700 */
        {DEVICE_SWI(0x114f, 0x68a2)},   /* Sierra Wireless MC7750 */
        {DEVICE_SWI(0x1199, 0x68a2)},   /* Sierra Wireless MC7710 */
-       {DEVICE_SWI(0x1199, 0x68c0)},   /* Sierra Wireless MC73xx */
        {DEVICE_SWI(0x1199, 0x901c)},   /* Sierra Wireless EM7700 */
        {DEVICE_SWI(0x1199, 0x901f)},   /* Sierra Wireless EM7355 */
        {DEVICE_SWI(0x1199, 0x9040)},   /* Sierra Wireless Modem */
index 8a6f371ed6e77e3ccdc99632c3cd41ebeb155213..9893d696fc973e9e4183b57b56b3ceb22570942f 100644 (file)
@@ -69,16 +69,39 @@ static int uas_use_uas_driver(struct usb_interface *intf,
                return 0;
 
        /*
-        * ASM1051 and older ASM1053 devices have the same usb-id, and UAS is
-        * broken on the ASM1051, use the number of streams to differentiate.
-        * New ASM1053-s also support 32 streams, but have a different prod-id.
+        * ASMedia has a number of usb3 to sata bridge chips, at the time of
+        * this writing the following versions exist:
+        * ASM1051 - no uas support version
+        * ASM1051 - with broken (*) uas support
+        * ASM1053 - with working uas support
+        * ASM1153 - with working uas support
+        *
+        * Devices with these chips re-use a number of device-ids over the
+        * entire line, so the device-id is useless to determine if we're
+        * dealing with an ASM1051 (which we want to avoid).
+        *
+        * The ASM1153 can be identified by config.MaxPower == 0,
+        * where as the ASM105x models have config.MaxPower == 36.
+        *
+        * Differentiating between the ASM1053 and ASM1051 is trickier, when
+        * connected over USB-3 we can look at the number of streams supported,
+        * ASM1051 supports 32 streams, where as early ASM1053 versions support
+        * 16 streams, newer ASM1053-s also support 32 streams, but have a
+        * different prod-id.
+        *
+        * (*) ASM1051 chips do work with UAS with some disks (with the
+        *     US_FL_NO_REPORT_OPCODES quirk), but are broken with other disks
         */
        if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c &&
-                       le16_to_cpu(udev->descriptor.idProduct) == 0x55aa) {
-               if (udev->speed < USB_SPEED_SUPER) {
+                       (le16_to_cpu(udev->descriptor.idProduct) == 0x5106 ||
+                        le16_to_cpu(udev->descriptor.idProduct) == 0x55aa)) {
+               if (udev->actconfig->desc.bMaxPower == 0) {
+                       /* ASM1153, do nothing */
+               } else if (udev->speed < USB_SPEED_SUPER) {
                        /* No streams info, assume ASM1051 */
                        flags |= US_FL_IGNORE_UAS;
                } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
+                       /* Possibly an ASM1051, disable uas */
                        flags |= US_FL_IGNORE_UAS;
                }
        }
index 11c7a96764415c4c299c77ddd4a2bc7b0b911967..d684b4b8108ff34a5c4023088d9e927dcb378f6b 100644 (file)
@@ -507,7 +507,7 @@ UNUSUAL_DEV(  0x04e6, 0x000c, 0x0100, 0x0100,
 UNUSUAL_DEV(  0x04e6, 0x000f, 0x0000, 0x9999,
                "SCM Microsystems",
                "eUSB SCSI Adapter (Bus Powered)",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
                US_FL_SCM_MULT_TARG ),
 
 UNUSUAL_DEV(  0x04e6, 0x0101, 0x0200, 0x0200,
@@ -1995,6 +1995,13 @@ UNUSUAL_DEV(  0x152d, 0x2329, 0x0100, 0x0100,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
 
+/* Reported by Dmitry Nezhevenko <dion@dion.org.ua> */
+UNUSUAL_DEV(  0x152d, 0x2566, 0x0114, 0x0114,
+               "JMicron",
+               "USB to ATA/ATAPI Bridge",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_BROKEN_FUA ),
+
 /* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
  * and Mac USB Dock USB-SCSI */
 UNUSUAL_DEV(  0x1645, 0x0007, 0x0100, 0x0133,
index 18a283d6de1c8bd18663b57bbf7499510c49fa2d..dbc00e56c7f5c106a67028e1b6da7dfee2c39e5e 100644 (file)
  * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
  */
 
+/*
+ * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
+ * commands in UAS mode.  Observed with the 1.28 firmware; are there others?
+ */
+UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
+               "Apricorn",
+               "",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_IGNORE_UAS),
+
 /* https://bugzilla.kernel.org/show_bug.cgi?id=79511 */
 UNUSUAL_DEV(0x0bc2, 0x2312, 0x0000, 0x9999,
                "Seagate",
@@ -68,6 +78,20 @@ UNUSUAL_DEV(0x0bc2, 0xa003, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_ATA_1X),
 
+/* Reported-by: Marcin Zajączkowski <mszpak@wp.pl> */
+UNUSUAL_DEV(0x0bc2, 0xa013, 0x0000, 0x9999,
+               "Seagate",
+               "Backup Plus",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_ATA_1X),
+
+/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+UNUSUAL_DEV(0x0bc2, 0xa0a4, 0x0000, 0x9999,
+               "Seagate",
+               "Backup Plus Desk",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_ATA_1X),
+
 /* https://bbs.archlinux.org/viewtopic.php?id=183190 */
 UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
                "Seagate",
@@ -82,6 +106,13 @@ UNUSUAL_DEV(0x0bc2, 0xab21, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_ATA_1X),
 
+/* Reported-by: G. Richard Bellamy <rbellamy@pteradigm.com> */
+UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
+               "Seagate",
+               "BUP Fast HDD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_ATA_1X),
+
 /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
 UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
                "JMicron",
@@ -89,14 +120,6 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_REPORT_OPCODES),
 
-/* Most ASM1051 based devices have issues with uas, blacklist them all */
-/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
-UNUSUAL_DEV(0x174c, 0x5106, 0x0000, 0x9999,
-               "ASMedia",
-               "ASM1051",
-               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
-               US_FL_IGNORE_UAS),
-
 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
 UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
                "VIA",
@@ -104,9 +127,23 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_ATA_1X),
 
+/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
+UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
+               "JMicron",
+               "JMS566",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_REPORT_OPCODES),
+
 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
 UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
                "Hitachi",
                "External HDD",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_UAS),
+
+/* Reported-by: Richard Henderson <rth@redhat.com> */
+UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
+               "SimpleTech",
+               "External HDD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_REPORT_OPCODES),
index 255201f22126aabd9ea9178fe353cf5f42a82c68..7cc0122a18cecbb7ef45cf8e438112ec2fb4ff00 100644 (file)
@@ -840,13 +840,11 @@ static const struct vfio_device_ops vfio_pci_ops = {
 
 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
-       u8 type;
        struct vfio_pci_device *vdev;
        struct iommu_group *group;
        int ret;
 
-       pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type);
-       if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL)
+       if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
                return -EINVAL;
 
        group = iommu_group_get(&pdev->dev);
index 14419a8ccbb6b138aa8bd38b7765166c1f4aa398..8dccca9013ed3fae55f1a716f85e1785f277a3f4 100644 (file)
@@ -84,10 +84,6 @@ struct vhost_net_ubuf_ref {
 
 struct vhost_net_virtqueue {
        struct vhost_virtqueue vq;
-       /* hdr is used to store the virtio header.
-        * Since each iovec has >= 1 byte length, we never need more than
-        * header length entries to store the header. */
-       struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
        size_t vhost_hlen;
        size_t sock_hlen;
        /* vhost zerocopy support fields below: */
@@ -235,44 +231,6 @@ static bool vhost_sock_zcopy(struct socket *sock)
                sock_flag(sock->sk, SOCK_ZEROCOPY);
 }
 
-/* Pop first len bytes from iovec. Return number of segments used. */
-static int move_iovec_hdr(struct iovec *from, struct iovec *to,
-                         size_t len, int iov_count)
-{
-       int seg = 0;
-       size_t size;
-
-       while (len && seg < iov_count) {
-               size = min(from->iov_len, len);
-               to->iov_base = from->iov_base;
-               to->iov_len = size;
-               from->iov_len -= size;
-               from->iov_base += size;
-               len -= size;
-               ++from;
-               ++to;
-               ++seg;
-       }
-       return seg;
-}
-/* Copy iovec entries for len bytes from iovec. */
-static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
-                          size_t len, int iovcount)
-{
-       int seg = 0;
-       size_t size;
-
-       while (len && seg < iovcount) {
-               size = min(from->iov_len, len);
-               to->iov_base = from->iov_base;
-               to->iov_len = size;
-               len -= size;
-               ++from;
-               ++to;
-               ++seg;
-       }
-}
-
 /* In case of DMA done not in order in lower device driver for some reason.
  * upend_idx is used to track end of used idx, done_idx is used to track head
  * of used idx. Once lower device DMA done contiguously, we will signal KVM
@@ -336,7 +294,7 @@ static void handle_tx(struct vhost_net *net)
 {
        struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
        struct vhost_virtqueue *vq = &nvq->vq;
-       unsigned out, in, s;
+       unsigned out, in;
        int head;
        struct msghdr msg = {
                .msg_name = NULL,
@@ -395,16 +353,17 @@ static void handle_tx(struct vhost_net *net)
                        break;
                }
                /* Skip header. TODO: support TSO. */
-               s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out);
                len = iov_length(vq->iov, out);
                iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len);
+               iov_iter_advance(&msg.msg_iter, hdr_size);
                /* Sanity check */
-               if (!len) {
+               if (!iov_iter_count(&msg.msg_iter)) {
                        vq_err(vq, "Unexpected header len for TX: "
                               "%zd expected %zd\n",
-                              iov_length(nvq->hdr, s), hdr_size);
+                              len, hdr_size);
                        break;
                }
+               len = iov_iter_count(&msg.msg_iter);
 
                zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
                                   && (nvq->upend_idx + 1) % UIO_MAXIOV !=
@@ -469,7 +428,7 @@ static int peek_head_len(struct sock *sk)
        head = skb_peek(&sk->sk_receive_queue);
        if (likely(head)) {
                len = head->len;
-               if (vlan_tx_tag_present(head))
+               if (skb_vlan_tag_present(head))
                        len += VLAN_HLEN;
        }
 
@@ -538,7 +497,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
                ++headcount;
                seg += in;
        }
-       heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen);
+       heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
        *iovcount = seg;
        if (unlikely(log))
                *log_num = nlogs;
@@ -579,6 +538,7 @@ static void handle_rx(struct vhost_net *net)
        size_t vhost_hlen, sock_hlen;
        size_t vhost_len, sock_len;
        struct socket *sock;
+       struct iov_iter fixup;
 
        mutex_lock(&vq->mutex);
        sock = vq->private_data;
@@ -623,14 +583,19 @@ static void handle_rx(struct vhost_net *net)
                        break;
                }
                /* We don't need to be notified again. */
-               if (unlikely((vhost_hlen)))
-                       /* Skip header. TODO: support TSO. */
-                       move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in);
-               else
-                       /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
-                        * needed because recvmsg can modify msg_iov. */
-                       copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in);
-               iov_iter_init(&msg.msg_iter, READ, vq->iov, in, sock_len);
+               iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
+               fixup = msg.msg_iter;
+               if (unlikely((vhost_hlen))) {
+                       /* We will supply the header ourselves
+                        * TODO: support TSO.
+                        */
+                       iov_iter_advance(&msg.msg_iter, vhost_hlen);
+               } else {
+                       /* It'll come from socket; we'll need to patch
+                        * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
+                        */
+                       iov_iter_advance(&fixup, sizeof(hdr));
+               }
                err = sock->ops->recvmsg(NULL, sock, &msg,
                                         sock_len, MSG_DONTWAIT | MSG_TRUNC);
                /* Userspace might have consumed the packet meanwhile:
@@ -642,18 +607,18 @@ static void handle_rx(struct vhost_net *net)
                        vhost_discard_vq_desc(vq, headcount);
                        continue;
                }
+               /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
                if (unlikely(vhost_hlen) &&
-                   memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0,
-                                     vhost_hlen)) {
+                   copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) {
                        vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
                               vq->iov->iov_base);
                        break;
                }
                /* TODO: Should check and handle checksum. */
+
+               hdr.num_buffers = cpu_to_vhost16(vq, headcount);
                if (likely(mergeable) &&
-                   memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
-                                     offsetof(typeof(hdr), num_buffers),
-                                     sizeof hdr.num_buffers)) {
+                   copy_to_iter(&hdr.num_buffers, 2, &fixup) != 2) {
                        vq_err(vq, "Failed num_buffers write");
                        vhost_discard_vq_desc(vq, headcount);
                        break;
index 01c01cb3933fdfc5bce7e9c457d128d2e8f460e4..dc78d87e0fc2c5e14832adf674899823ac223c5f 100644 (file)
@@ -911,6 +911,23 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
        return 0;
 }
 
+static int vhost_scsi_to_tcm_attr(int attr)
+{
+       switch (attr) {
+       case VIRTIO_SCSI_S_SIMPLE:
+               return TCM_SIMPLE_TAG;
+       case VIRTIO_SCSI_S_ORDERED:
+               return TCM_ORDERED_TAG;
+       case VIRTIO_SCSI_S_HEAD:
+               return TCM_HEAD_TAG;
+       case VIRTIO_SCSI_S_ACA:
+               return TCM_ACA_TAG;
+       default:
+               break;
+       }
+       return TCM_SIMPLE_TAG;
+}
+
 static void tcm_vhost_submission_work(struct work_struct *work)
 {
        struct tcm_vhost_cmd *cmd =
@@ -936,9 +953,10 @@ static void tcm_vhost_submission_work(struct work_struct *work)
        rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
                        cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
                        cmd->tvc_lun, cmd->tvc_exp_data_len,
-                       cmd->tvc_task_attr, cmd->tvc_data_direction,
-                       TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
-                       NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
+                       vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
+                       cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
+                       sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
+                       cmd->tvc_prot_sgl_count);
        if (rc < 0) {
                transport_send_check_condition_and_sense(se_cmd,
                                TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
@@ -1061,7 +1079,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                               req_size, vq->iov[0].iov_len);
                        break;
                }
-               ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
+               ret = copy_from_user(req, vq->iov[0].iov_base, req_size);
                if (unlikely(ret)) {
                        vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
                        break;
index ed71b5347a766ee26c95039638a028ece31b53de..2ee28266fd0704fd1e1c4c64a6f19c8d863727fd 100644 (file)
@@ -713,9 +713,13 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
                        r = -EFAULT;
                        break;
                }
-               if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
-                   (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
-                   (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
+
+               /* Make sure it's safe to cast pointers to vring types. */
+               BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
+               BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
+               if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
+                   (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
+                   (a.log_guest_addr & (sizeof(u64) - 1))) {
                        r = -EINVAL;
                        break;
                }
@@ -1121,6 +1125,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
        struct vring_desc desc;
        unsigned int i = 0, count, found = 0;
        u32 len = vhost32_to_cpu(vq, indirect->len);
+       struct iov_iter from;
        int ret;
 
        /* Sanity check */
@@ -1138,6 +1143,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
                vq_err(vq, "Translation failure %d in indirect.\n", ret);
                return ret;
        }
+       iov_iter_init(&from, READ, vq->indirect, ret, len);
 
        /* We will use the result as an address to read from, so most
         * architectures only need a compiler barrier here. */
@@ -1160,8 +1166,8 @@ static int get_indirect(struct vhost_virtqueue *vq,
                               i, count);
                        return -EINVAL;
                }
-               if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
-                                             vq->indirect, sizeof desc))) {
+               if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) !=
+                            sizeof(desc))) {
                        vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
                               i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
                        return -EINVAL;
index 1c29bd19e3d5fe9954153a8b821058a1be5ff6d5..0e5fde1d3ffbe5a152035f33063afa98bf84f33e 100644 (file)
@@ -636,7 +636,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
                err = broadsheet_spiflash_read_range(par, start_sector_addr,
                                                data_start_addr, sector_buffer);
                if (err)
-                       return err;
+                       goto out;
        }
 
        /* now we copy our data into the right place in the sector buffer */
@@ -657,7 +657,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
                err = broadsheet_spiflash_read_range(par, tail_start_addr,
                        tail_len, sector_buffer + tail_start_addr);
                if (err)
-                       return err;
+                       goto out;
        }
 
        /* if we got here we have the full sector that we want to rewrite. */
@@ -665,11 +665,13 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
        /* first erase the sector */
        err = broadsheet_spiflash_erase_sector(par, start_sector_addr);
        if (err)
-               return err;
+               goto out;
 
        /* now write it */
        err = broadsheet_spiflash_write_sector(par, start_sector_addr,
                                        sector_buffer, sector_size);
+out:
+       kfree(sector_buffer);
        return err;
 }
 
index 900aa4ecd617990c8ce0caf1c46fb2993c8233b5..d6cab1fd9a4795da2fe2348c2882bc3e04a6cf30 100644 (file)
@@ -83,9 +83,10 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
        cancel_delayed_work_sync(&info->deferred_work);
 
        /* Run it immediately */
-       err = schedule_delayed_work(&info->deferred_work, 0);
+       schedule_delayed_work(&info->deferred_work, 0);
        mutex_unlock(&inode->i_mutex);
-       return err;
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
 
index 87accdb59c81b5ed5686ad2c291c636fe541289a..ac83ef5cfd7d7f6a96848d546e3c7155dfdcd148 100644 (file)
@@ -132,7 +132,6 @@ static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
        .mX_max = 127,
        .fint_min = 500000,
        .fint_max = 2500000,
-       .clkdco_max = 1800000000,
 
        .clkdco_min = 500000000,
        .clkdco_low = 1000000000,
@@ -156,7 +155,6 @@ static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
        .mX_max = 127,
        .fint_min = 620000,
        .fint_max = 2500000,
-       .clkdco_max = 1800000000,
 
        .clkdco_min = 750000000,
        .clkdco_low = 1500000000,
index 50bc62c5d367f5586bf5431a16e2b4ec91599ca6..335ffac224b97a57b6abc043b74ae47601e27535 100644 (file)
@@ -97,7 +97,8 @@ int dss_pll_enable(struct dss_pll *pll)
        return 0;
 
 err_enable:
-       regulator_disable(pll->regulator);
+       if (pll->regulator)
+               regulator_disable(pll->regulator);
 err_reg:
        clk_disable_unprepare(pll->clkin);
        return r;
index d51a983075bc57a579664367214637f3aeb3e647..5c2ccab5a958f6d41a68cbab3a48e78e8514cbda 100644 (file)
@@ -342,6 +342,8 @@ static void sdi_init_output(struct platform_device *pdev)
        out->output_type = OMAP_DISPLAY_TYPE_SDI;
        out->name = "sdi.0";
        out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
+       /* We have SDI only on OMAP3, where it's on port 1 */
+       out->port_num = 1;
        out->ops.sdi = &sdi_ops;
        out->owner = THIS_MODULE;
 
index 92cac803dee3c2261655a34a45a6a0c2d170636f..1085c0432158c02038aba6f961ea7e300f7f950b 100644 (file)
@@ -402,7 +402,7 @@ static int __init simplefb_init(void)
        if (ret)
                return ret;
 
-       if (IS_ENABLED(CONFIG_OF) && of_chosen) {
+       if (IS_ENABLED(CONFIG_OF_ADDRESS) && of_chosen) {
                for_each_child_of_node(of_chosen, np) {
                        if (of_device_is_compatible(np, "simple-framebuffer"))
                                of_platform_device_create(np, NULL, NULL);
index 940cd196eef53ab6cc02bf44d2320f610e36685d..10fbfd8ab963f9e78905a5d932b13ae810639363 100644 (file)
@@ -21,6 +21,21 @@ static bool nologo;
 module_param(nologo, bool, 0);
 MODULE_PARM_DESC(nologo, "Disables startup logo");
 
+/*
+ * Logos are located in the initdata, and will be freed in kernel_init.
+ * Use late_init to mark the logos as freed to prevent any further use.
+ */
+
+static bool logos_freed;
+
+static int __init fb_logo_late_init(void)
+{
+       logos_freed = true;
+       return 0;
+}
+
+late_initcall(fb_logo_late_init);
+
 /* logo's are marked __initdata. Use __init_refok to tell
  * modpost that it is intended that this function uses data
  * marked __initdata.
@@ -29,7 +44,7 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
 {
        const struct linux_logo *logo = NULL;
 
-       if (nologo)
+       if (nologo || logos_freed)
                return NULL;
 
        if (depth >= 1) {
index 2ef9529809d8bd198455a1af19151c22fe4ca715..9756f21b809e080d1d1975b0734cb82cdea6e9e3 100644 (file)
@@ -282,6 +282,7 @@ void vp_del_vqs(struct virtio_device *vdev)
 
        vp_free_vectors(vdev);
        kfree(vp_dev->vqs);
+       vp_dev->vqs = NULL;
 }
 
 static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
@@ -421,15 +422,6 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
        return 0;
 }
 
-void virtio_pci_release_dev(struct device *_d)
-{
-       /*
-        * No need for a release method as we allocate/free
-        * all devices together with the pci devices.
-        * Provide an empty one to avoid getting a warning from core.
-        */
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int virtio_pci_freeze(struct device *dev)
 {
index adddb647b21d826c76ce54dac8707828c0ba381c..5a497289b7e9c336d1478db41ca5c0f60fbbafb9 100644 (file)
@@ -126,7 +126,6 @@ const char *vp_bus_name(struct virtio_device *vdev);
  * - ignore the affinity request if we're using INTX
  */
 int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
-void virtio_pci_release_dev(struct device *);
 
 int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
                            const struct pci_device_id *id);
index 6c76f0f5658ccfcef86b865b9f9e2fa76334245c..a5486e65e04bd55d5c64a33d3dbeeadb27dd4857 100644 (file)
@@ -211,6 +211,17 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
        .set_vq_affinity = vp_set_vq_affinity,
 };
 
+static void virtio_pci_release_dev(struct device *_d)
+{
+       struct virtio_device *vdev = dev_to_virtio(_d);
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+       /* As struct device is a kobject, it's not safe to
+        * free the memory (including the reference counter itself)
+        * until it's release callback. */
+       kfree(vp_dev);
+}
+
 /* the PCI probing function */
 int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
                            const struct pci_device_id *id)
@@ -302,5 +313,4 @@ void virtio_pci_legacy_remove(struct pci_dev *pci_dev)
        pci_iounmap(pci_dev, vp_dev->ioaddr);
        pci_release_regions(pci_dev);
        pci_disable_device(pci_dev);
-       kfree(vp_dev);
 }
index 5927c0a98a74b29cddac4d7894a71580590b7ff2..bcfd2a22208f34b621faf6922625e9e9fec83832 100644 (file)
@@ -503,7 +503,6 @@ static struct platform_driver cdns_wdt_driver = {
        .shutdown       = cdns_wdt_shutdown,
        .driver         = {
                .name   = "cdns-wdt",
-               .owner  = THIS_MODULE,
                .of_match_table = cdns_wdt_of_match,
                .pm     = &cdns_wdt_pm_ops,
        },
index d6add516a7a7635662e5c1cb12a10206b93c5544..5142bbabe0279f0b36c92c854f7daa407e2e32a5 100644 (file)
@@ -52,6 +52,8 @@
 #define IMX2_WDT_WRSR          0x04            /* Reset Status Register */
 #define IMX2_WDT_WRSR_TOUT     (1 << 1)        /* -> Reset due to Timeout */
 
+#define IMX2_WDT_WMCR          0x08            /* Misc Register */
+
 #define IMX2_WDT_MAX_TIME      128
 #define IMX2_WDT_DEFAULT_TIME  60              /* in seconds */
 
@@ -274,6 +276,13 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
 
        imx2_wdt_ping_if_active(wdog);
 
+       /*
+        * Disable the watchdog power down counter at boot. Otherwise the power
+        * down counter will pull down the #WDOG interrupt line for one clock
+        * cycle.
+        */
+       regmap_write(wdev->regmap, IMX2_WDT_WMCR, 0);
+
        ret = watchdog_register_device(wdog);
        if (ret) {
                dev_err(&pdev->dev, "cannot register watchdog device\n");
@@ -327,18 +336,21 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
 }
 
 #ifdef CONFIG_PM_SLEEP
-/* Disable watchdog if it is active during suspend */
+/* Disable watchdog if it is active or non-active but still running */
 static int imx2_wdt_suspend(struct device *dev)
 {
        struct watchdog_device *wdog = dev_get_drvdata(dev);
        struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
 
-       imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
-       imx2_wdt_ping(wdog);
+       /* The watchdog IP block is running */
+       if (imx2_wdt_is_running(wdev)) {
+               imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
+               imx2_wdt_ping(wdog);
 
-       /* Watchdog has been stopped but IP block is still running */
-       if (!watchdog_active(wdog) && imx2_wdt_is_running(wdev))
-               del_timer_sync(&wdev->timer);
+               /* The watchdog is not active */
+               if (!watchdog_active(wdog))
+                       del_timer_sync(&wdev->timer);
+       }
 
        clk_disable_unprepare(wdev->clk);
 
@@ -354,15 +366,25 @@ static int imx2_wdt_resume(struct device *dev)
        clk_prepare_enable(wdev->clk);
 
        if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) {
-               /* Resumes from deep sleep we need restart
-                * the watchdog again.
+               /*
+                * If the watchdog is still active and resumes
+                * from deep sleep state, need to restart the
+                * watchdog again.
                 */
                imx2_wdt_setup(wdog);
                imx2_wdt_set_timeout(wdog, wdog->timeout);
                imx2_wdt_ping(wdog);
        } else if (imx2_wdt_is_running(wdev)) {
+               /* Resuming from non-deep sleep state. */
+               imx2_wdt_set_timeout(wdog, wdog->timeout);
                imx2_wdt_ping(wdog);
-               mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2);
+               /*
+                * But the watchdog is not active, then start
+                * the timer again.
+                */
+               if (!watchdog_active(wdog))
+                       mod_timer(&wdev->timer,
+                                 jiffies + wdog->timeout * HZ / 2);
        }
 
        return 0;
index ef6a298e8c45833843097d51499333ff9a4ecfef..1f4155ee3404de97e4eee3988dc7823c9874ecbd 100644 (file)
@@ -215,7 +215,6 @@ static struct platform_driver meson_wdt_driver = {
        .remove         = meson_wdt_remove,
        .shutdown       = meson_wdt_shutdown,
        .driver         = {
-               .owner          = THIS_MODULE,
                .name           = DRV_NAME,
                .of_match_table = meson_wdt_dt_ids,
        },
index 06e14bfb3496c9d9aab923243c104addfbff7196..dbc732e9a5c01eb18ab91af910a997881dfe5fd8 100644 (file)
@@ -306,8 +306,8 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg,
 
                        _debug("- range %u-%u%s",
                               offset, to, msg->msg_flags ? " [more]" : "");
-                       iov_iter_init(&msg->msg_iter, WRITE,
-                                     (struct iovec *) iov, 1, to - offset);
+                       iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC,
+                                     iov, 1, to - offset);
 
                        /* have to change the state *before* sending the last
                         * packet as RxRPC might give us the reply before it
@@ -384,7 +384,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
 
        msg.msg_name            = NULL;
        msg.msg_namelen         = 0;
-       iov_iter_init(&msg.msg_iter, WRITE, (struct iovec *)iov, 1,
+       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1,
                      call->request_size);
        msg.msg_control         = NULL;
        msg.msg_controllen      = 0;
@@ -770,7 +770,7 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
 void afs_send_empty_reply(struct afs_call *call)
 {
        struct msghdr msg;
-       struct iovec iov[1];
+       struct kvec iov[1];
 
        _enter("");
 
@@ -778,7 +778,7 @@ void afs_send_empty_reply(struct afs_call *call)
        iov[0].iov_len          = 0;
        msg.msg_name            = NULL;
        msg.msg_namelen         = 0;
-       iov_iter_init(&msg.msg_iter, WRITE, iov, 0, 0); /* WTF? */
+       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 0, 0);     /* WTF? */
        msg.msg_control         = NULL;
        msg.msg_controllen      = 0;
        msg.msg_flags           = 0;
@@ -805,7 +805,7 @@ void afs_send_empty_reply(struct afs_call *call)
 void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
 {
        struct msghdr msg;
-       struct iovec iov[1];
+       struct kvec iov[1];
        int n;
 
        _enter("");
@@ -814,7 +814,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
        iov[0].iov_len          = len;
        msg.msg_name            = NULL;
        msg.msg_namelen         = 0;
-       iov_iter_init(&msg.msg_iter, WRITE, iov, 1, len);
+       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, len);
        msg.msg_control         = NULL;
        msg.msg_controllen      = 0;
        msg.msg_flags           = 0;
index 2d3e32ebfd15510b8e97519a006486c83755121b..8729cf68d2fef5e41540283d74beba55285f59c5 100644 (file)
@@ -1552,7 +1552,6 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
 {
        int ret;
        int type;
-       struct btrfs_tree_block_info *info;
        struct btrfs_extent_inline_ref *eiref;
 
        if (*ptr == (unsigned long)-1)
@@ -1573,9 +1572,17 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
        }
 
        /* we can treat both ref types equally here */
-       info = (struct btrfs_tree_block_info *)(ei + 1);
        *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
-       *out_level = btrfs_tree_block_level(eb, info);
+
+       if (key->type == BTRFS_EXTENT_ITEM_KEY) {
+               struct btrfs_tree_block_info *info;
+
+               info = (struct btrfs_tree_block_info *)(ei + 1);
+               *out_level = btrfs_tree_block_level(eb, info);
+       } else {
+               ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
+               *out_level = (u8)key->offset;
+       }
 
        if (ret == 1)
                *ptr = (unsigned long)-1;
index 7e607416755a880fef1a06d3a8a3482417c0b364..0b180708bf79d87a36c9dcc78bbd6d72772101df 100644 (file)
@@ -1171,6 +1171,7 @@ struct btrfs_space_info {
        struct percpu_counter total_bytes_pinned;
 
        struct list_head list;
+       /* Protected by the spinlock 'lock'. */
        struct list_head ro_bgs;
 
        struct rw_semaphore groups_sem;
index 054577bddaf27869d9a524a73d4df5a76072e4e1..de4e70fb3cbbd4a5c28d13f1fe3aec16733ed49f 100644 (file)
@@ -1857,6 +1857,14 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
 {
        struct btrfs_delayed_node *delayed_node;
 
+       /*
+        * we don't do delayed inode updates during log recovery because it
+        * leads to enospc problems.  This means we also can't do
+        * delayed inode refs
+        */
+       if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
+               return -EAGAIN;
+
        delayed_node = btrfs_get_or_create_delayed_node(inode);
        if (IS_ERR(delayed_node))
                return PTR_ERR(delayed_node);
index a80b97100d90b3162d7d3688ed6b3c459bb56bc8..a684086c3c8123702cc41caa4d4dfe085aa7db3b 100644 (file)
@@ -3139,9 +3139,11 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
        struct extent_buffer *leaf;
 
        ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
-       if (ret < 0)
+       if (ret) {
+               if (ret > 0)
+                       ret = -ENOENT;
                goto fail;
-       BUG_ON(ret); /* Corruption */
+       }
 
        leaf = path->nodes[0];
        bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
@@ -3149,11 +3151,9 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(leaf);
        btrfs_release_path(path);
 fail:
-       if (ret) {
+       if (ret)
                btrfs_abort_transaction(trans, root, ret);
-               return ret;
-       }
-       return 0;
+       return ret;
 
 }
 
@@ -9422,7 +9422,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
         * are still on the list after taking the semaphore
         */
        list_del_init(&block_group->list);
-       list_del_init(&block_group->ro_list);
        if (list_empty(&block_group->space_info->block_groups[index])) {
                kobj = block_group->space_info->block_group_kobjs[index];
                block_group->space_info->block_group_kobjs[index] = NULL;
@@ -9464,6 +9463,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        btrfs_remove_free_space_cache(block_group);
 
        spin_lock(&block_group->space_info->lock);
+       list_del_init(&block_group->ro_list);
        block_group->space_info->total_bytes -= block_group->key.offset;
        block_group->space_info->bytes_readonly -= block_group->key.offset;
        block_group->space_info->disk_total -= block_group->key.offset * factor;
index 4ebabd2371533788c496070122def464004328ac..790dbae3343c4f965eaa58e317c08d702ad0a7ff 100644 (file)
@@ -2190,7 +2190,7 @@ void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
 
                next = next_state(state);
 
-               failrec = (struct io_failure_record *)state->private;
+               failrec = (struct io_failure_record *)(unsigned long)state->private;
                free_extent_state(state);
                kfree(failrec);
 
index e687bb0dc73a36724a921d40bc66c89473f7edac..8bf326affb944026a43bbc42a900d6f8355ce837 100644 (file)
@@ -6255,8 +6255,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 
 out_fail:
        btrfs_end_transaction(trans, root);
-       if (drop_on_err)
+       if (drop_on_err) {
+               inode_dec_link_count(inode);
                iput(inode);
+       }
        btrfs_balance_delayed_items(root);
        btrfs_btree_balance_dirty(root);
        return err;
index f2bb13a23f860ea19d0403057395d38d8b9d2632..e427cb7ee12c7d848cd16402d78854e22db969dd 100644 (file)
@@ -2607,9 +2607,9 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity,
                ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
                                             flags, gen, mirror_num,
                                             have_csum ? csum : NULL);
-skip:
                if (ret)
                        return ret;
+skip:
                len -= l;
                logical += l;
                physical += l;
@@ -3053,7 +3053,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 
        ppath = btrfs_alloc_path();
        if (!ppath) {
-               btrfs_free_path(ppath);
+               btrfs_free_path(path);
                return -ENOMEM;
        }
 
@@ -3065,6 +3065,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        path->search_commit_root = 1;
        path->skip_locking = 1;
 
+       ppath->search_commit_root = 1;
+       ppath->skip_locking = 1;
        /*
         * trigger the readahead for extent tree csum tree and wait for
         * completion. During readahead, the scrub is officially paused
index 60f7cbe815e9c88362a1680f8e4259f51ca6f019..6f49b2872a6454330bac0ef912be3d0152e2ef4f 100644 (file)
@@ -1000,10 +1000,20 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
                         */
                        if (fs_info->pending_changes == 0)
                                return 0;
+                       /*
+                        * A non-blocking test if the fs is frozen. We must not
+                        * start a new transaction here otherwise a deadlock
+                        * happens. The pending operations are delayed to the
+                        * next commit after thawing.
+                        */
+                       if (__sb_start_write(sb, SB_FREEZE_WRITE, false))
+                               __sb_end_write(sb, SB_FREEZE_WRITE);
+                       else
+                               return 0;
                        trans = btrfs_start_transaction(root, 0);
-               } else {
-                       return PTR_ERR(trans);
                }
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
        }
        return btrfs_commit_transaction(trans, root);
 }
index a605d4e2f2bca98e14c430c31514588c1b7982f1..e88b59d13439690f15810359ee7be343ad86b7a9 100644 (file)
@@ -2118,7 +2118,7 @@ void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
        unsigned long prev;
        unsigned long bit;
 
-       prev = cmpxchg(&fs_info->pending_changes, 0, 0);
+       prev = xchg(&fs_info->pending_changes, 0);
        if (!prev)
                return;
 
index f5013d92a7e6b73d9c4369683f6aac987afbd287..c81c0e004588b9e2ae03580ed6265096d6608d46 100644 (file)
@@ -1416,7 +1416,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
                }
        }
 
-       dout("fill_inline_data %p %llx.%llx len %lu locked_page %p\n",
+       dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
             inode, ceph_vinop(inode), len, locked_page);
 
        if (len > 0) {
index 9c56ef776407ad28e30760e2a0f0c1caede8fea0..7febcf2475c5ab675c04dfd2fddaa3ed574522a0 100644 (file)
@@ -606,9 +606,11 @@ cifs_security_flags_handle_must_flags(unsigned int *flags)
                *flags = CIFSSEC_MUST_NTLMV2;
        else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM)
                *flags = CIFSSEC_MUST_NTLM;
-       else if ((*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN)
+       else if (CIFSSEC_MUST_LANMAN &&
+                (*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN)
                *flags = CIFSSEC_MUST_LANMAN;
-       else if ((*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT)
+       else if (CIFSSEC_MUST_PLNTXT &&
+                (*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT)
                *flags = CIFSSEC_MUST_PLNTXT;
 
        *flags |= signflags;
index 96b7e9b7706dc58b767863fdeadf3cadfb4e5324..74f12877493ac6c3f87792192b8aa8c1190f3c05 100644 (file)
@@ -366,6 +366,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
        struct cifsLockInfo *li, *tmp;
        struct cifs_fid fid;
        struct cifs_pending_open open;
+       bool oplock_break_cancelled;
 
        spin_lock(&cifs_file_list_lock);
        if (--cifs_file->count > 0) {
@@ -397,7 +398,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
        }
        spin_unlock(&cifs_file_list_lock);
 
-       cancel_work_sync(&cifs_file->oplock_break);
+       oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
 
        if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
                struct TCP_Server_Info *server = tcon->ses->server;
@@ -409,6 +410,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
                _free_xid(xid);
        }
 
+       if (oplock_break_cancelled)
+               cifs_done_oplock_break(cifsi);
+
        cifs_del_pending_open(&open);
 
        /*
index 45cb59bcc79188df595447ab93b56f9cf1a6f080..8b7898b7670f88c3ea9ec596129ef569eed183bd 100644 (file)
@@ -86,21 +86,16 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
        }
 
        src_inode = file_inode(src_file.file);
+       rc = -EINVAL;
+       if (S_ISDIR(src_inode->i_mode))
+               goto out_fput;
 
        /*
         * Note: cifs case is easier than btrfs since server responsible for
         * checks for proper open modes and file type and if it wants
         * server could even support copy of range where source = target
         */
-
-       /* so we do not deadlock racing two ioctls on same files */
-       if (target_inode < src_inode) {
-               mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_PARENT);
-               mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD);
-       } else {
-               mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_PARENT);
-               mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_CHILD);
-       }
+       lock_two_nondirectories(target_inode, src_inode);
 
        /* determine range to clone */
        rc = -EINVAL;
@@ -124,13 +119,7 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
 out_unlock:
        /* although unlocking in the reverse order from locking is not
           strictly necessary here it is a little cleaner to be consistent */
-       if (target_inode < src_inode) {
-               mutex_unlock(&src_inode->i_mutex);
-               mutex_unlock(&target_inode->i_mutex);
-       } else {
-               mutex_unlock(&target_inode->i_mutex);
-               mutex_unlock(&src_inode->i_mutex);
-       }
+       unlock_two_nondirectories(src_inode, target_inode);
 out_fput:
        fdput(src_file);
 out_drop_write:
index 6c1566366a6613cbc494fe46344c8fd00342a82b..a4232ec4f2ba45386b4f25db484f7f30135b01c2 100644 (file)
@@ -221,7 +221,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
        }
 
        rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
-       memset(wpwd, 0, 129 * sizeof(__le16));
+       memzero_explicit(wpwd, sizeof(wpwd));
 
        return rc;
 }
index e7cfbaf8d0e2ed66b404c259dcd64c2d4ccd5f54..1e6e227134d7b5dae4fa3fc43feafeb994bfa708 100644 (file)
@@ -56,13 +56,8 @@ static int send_data(struct sk_buff *skb)
 {
        struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
        void *data = genlmsg_data(genlhdr);
-       int rv;
 
-       rv = genlmsg_end(skb, data);
-       if (rv < 0) {
-               nlmsg_free(skb);
-               return rv;
-       }
+       genlmsg_end(skb, data);
 
        return genlmsg_unicast(&init_net, skb, listener_nlportid);
 }
index e5d3eadf47b1e7fb6251c590016044cf8b5c4c98..bed43081720f718fc30dca204be8509ddcf5eac5 100644 (file)
@@ -5166,8 +5166,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 
        /* fallback to generic here if not in extents fmt */
        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
-               return __generic_block_fiemap(inode, fieinfo, start, len,
-                                             ext4_get_block);
+               return generic_block_fiemap(inode, fieinfo, start, len,
+                       ext4_get_block);
 
        if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
                return -EBADR;
index 513c12cf444c239f5c34bd4d73c653029bdaca96..8131be8c0af3166aac865557baa9f0371564a397 100644 (file)
@@ -273,19 +273,24 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
  * we determine this extent as a data or a hole according to whether the
  * page cache has data or not.
  */
-static int ext4_find_unwritten_pgoff(struct inode *inode, int whence,
-                                    loff_t endoff, loff_t *offset)
+static int ext4_find_unwritten_pgoff(struct inode *inode,
+                                    int whence,
+                                    struct ext4_map_blocks *map,
+                                    loff_t *offset)
 {
        struct pagevec pvec;
+       unsigned int blkbits;
        pgoff_t index;
        pgoff_t end;
+       loff_t endoff;
        loff_t startoff;
        loff_t lastoff;
        int found = 0;
 
+       blkbits = inode->i_sb->s_blocksize_bits;
        startoff = *offset;
        lastoff = startoff;
-
+       endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
 
        index = startoff >> PAGE_CACHE_SHIFT;
        end = endoff >> PAGE_CACHE_SHIFT;
@@ -403,144 +408,147 @@ out:
 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
 {
        struct inode *inode = file->f_mapping->host;
-       struct fiemap_extent_info fie;
-       struct fiemap_extent ext[2];
-       loff_t next;
-       int i, ret = 0;
+       struct ext4_map_blocks map;
+       struct extent_status es;
+       ext4_lblk_t start, last, end;
+       loff_t dataoff, isize;
+       int blkbits;
+       int ret = 0;
 
        mutex_lock(&inode->i_mutex);
-       if (offset >= inode->i_size) {
+
+       isize = i_size_read(inode);
+       if (offset >= isize) {
                mutex_unlock(&inode->i_mutex);
                return -ENXIO;
        }
-       fie.fi_flags = 0;
-       fie.fi_extents_max = 2;
-       fie.fi_extents_start = (struct fiemap_extent __user *) &ext;
-       while (1) {
-               mm_segment_t old_fs = get_fs();
-
-               fie.fi_extents_mapped = 0;
-               memset(ext, 0, sizeof(*ext) * fie.fi_extents_max);
-
-               set_fs(get_ds());
-               ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
-               set_fs(old_fs);
-               if (ret)
+
+       blkbits = inode->i_sb->s_blocksize_bits;
+       start = offset >> blkbits;
+       last = start;
+       end = isize >> blkbits;
+       dataoff = offset;
+
+       do {
+               map.m_lblk = last;
+               map.m_len = end - last + 1;
+               ret = ext4_map_blocks(NULL, inode, &map, 0);
+               if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+                       if (last != start)
+                               dataoff = (loff_t)last << blkbits;
                        break;
+               }
 
-               /* No extents found, EOF */
-               if (!fie.fi_extents_mapped) {
-                       ret = -ENXIO;
+               /*
+                * If there is a delay extent at this offset,
+                * it will be as a data.
+                */
+               ext4_es_find_delayed_extent_range(inode, last, last, &es);
+               if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
+                       if (last != start)
+                               dataoff = (loff_t)last << blkbits;
                        break;
                }
-               for (i = 0; i < fie.fi_extents_mapped; i++) {
-                       next = (loff_t)(ext[i].fe_length + ext[i].fe_logical);
 
-                       if (offset < (loff_t)ext[i].fe_logical)
-                               offset = (loff_t)ext[i].fe_logical;
-                       /*
-                        * If extent is not unwritten, then it contains valid
-                        * data, mapped or delayed.
-                        */
-                       if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN))
-                               goto out;
+               /*
+                * If there is a unwritten extent at this offset,
+                * it will be as a data or a hole according to page
+                * cache that has data or not.
+                */
+               if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+                       int unwritten;
+                       unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
+                                                             &map, &dataoff);
+                       if (unwritten)
+                               break;
+               }
 
-                       /*
-                        * If there is a unwritten extent at this offset,
-                        * it will be as a data or a hole according to page
-                        * cache that has data or not.
-                        */
-                       if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
-                                                     next, &offset))
-                               goto out;
+               last++;
+               dataoff = (loff_t)last << blkbits;
+       } while (last <= end);
 
-                       if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) {
-                               ret = -ENXIO;
-                               goto out;
-                       }
-                       offset = next;
-               }
-       }
-       if (offset > inode->i_size)
-               offset = inode->i_size;
-out:
        mutex_unlock(&inode->i_mutex);
-       if (ret)
-               return ret;
 
-       return vfs_setpos(file, offset, maxsize);
+       if (dataoff > isize)
+               return -ENXIO;
+
+       return vfs_setpos(file, dataoff, maxsize);
 }
 
 /*
- * ext4_seek_hole() retrieves the offset for SEEK_HOLE
+ * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
  */
 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
 {
        struct inode *inode = file->f_mapping->host;
-       struct fiemap_extent_info fie;
-       struct fiemap_extent ext[2];
-       loff_t next;
-       int i, ret = 0;
+       struct ext4_map_blocks map;
+       struct extent_status es;
+       ext4_lblk_t start, last, end;
+       loff_t holeoff, isize;
+       int blkbits;
+       int ret = 0;
 
        mutex_lock(&inode->i_mutex);
-       if (offset >= inode->i_size) {
+
+       isize = i_size_read(inode);
+       if (offset >= isize) {
                mutex_unlock(&inode->i_mutex);
                return -ENXIO;
        }
 
-       fie.fi_flags = 0;
-       fie.fi_extents_max = 2;
-       fie.fi_extents_start = (struct fiemap_extent __user *)&ext;
-       while (1) {
-               mm_segment_t old_fs = get_fs();
-
-               fie.fi_extents_mapped = 0;
-               memset(ext, 0, sizeof(*ext));
+       blkbits = inode->i_sb->s_blocksize_bits;
+       start = offset >> blkbits;
+       last = start;
+       end = isize >> blkbits;
+       holeoff = offset;
 
-               set_fs(get_ds());
-               ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
-               set_fs(old_fs);
-               if (ret)
-                       break;
+       do {
+               map.m_lblk = last;
+               map.m_len = end - last + 1;
+               ret = ext4_map_blocks(NULL, inode, &map, 0);
+               if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+                       last += ret;
+                       holeoff = (loff_t)last << blkbits;
+                       continue;
+               }
 
-               /* No extents found */
-               if (!fie.fi_extents_mapped)
-                       break;
+               /*
+                * If there is a delay extent at this offset,
+                * we will skip this extent.
+                */
+               ext4_es_find_delayed_extent_range(inode, last, last, &es);
+               if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
+                       last = es.es_lblk + es.es_len;
+                       holeoff = (loff_t)last << blkbits;
+                       continue;
+               }
 
-               for (i = 0; i < fie.fi_extents_mapped; i++) {
-                       next = (loff_t)(ext[i].fe_logical + ext[i].fe_length);
-                       /*
-                        * If extent is not unwritten, then it contains valid
-                        * data, mapped or delayed.
-                        */
-                       if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) {
-                               if (offset < (loff_t)ext[i].fe_logical)
-                                       goto out;
-                               offset = next;
+               /*
+                * If there is a unwritten extent at this offset,
+                * it will be as a data or a hole according to page
+                * cache that has data or not.
+                */
+               if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+                       int unwritten;
+                       unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
+                                                             &map, &holeoff);
+                       if (!unwritten) {
+                               last += ret;
+                               holeoff = (loff_t)last << blkbits;
                                continue;
                        }
-                       /*
-                        * If there is a unwritten extent at this offset,
-                        * it will be as a data or a hole according to page
-                        * cache that has data or not.
-                        */
-                       if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
-                                                     next, &offset))
-                               goto out;
-
-                       offset = next;
-                       if (ext[i].fe_flags & FIEMAP_EXTENT_LAST)
-                               goto out;
                }
-       }
-       if (offset > inode->i_size)
-               offset = inode->i_size;
-out:
+
+               /* find a hole */
+               break;
+       } while (last <= end);
+
        mutex_unlock(&inode->i_mutex);
-       if (ret)
-               return ret;
 
-       return vfs_setpos(file, offset, maxsize);
+       if (holeoff > isize)
+               holeoff = isize;
+
+       return vfs_setpos(file, holeoff, maxsize);
 }
 
 /*
index bf76f405a5f91df5f276e2a2d1dcf2935178d4a7..8a8ec6293b195f16623e716342463979427b3156 100644 (file)
@@ -23,6 +23,18 @@ int ext4_resize_begin(struct super_block *sb)
        if (!capable(CAP_SYS_RESOURCE))
                return -EPERM;
 
+       /*
+        * If we are not using the primary superblock/GDT copy don't resize,
+         * because the user tools have no way of handling this.  Probably a
+         * bad time to do it anyways.
+         */
+       if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+           le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
+               ext4_warning(sb, "won't resize using backup superblock at %llu",
+                       (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
+               return -EPERM;
+       }
+
        /*
         * We are not allowed to do online-resizing on a filesystem mounted
         * with error, because it can destroy the filesystem easily.
@@ -758,18 +770,6 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
                       "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
                       gdb_num);
 
-       /*
-        * If we are not using the primary superblock/GDT copy don't resize,
-         * because the user tools have no way of handling this.  Probably a
-         * bad time to do it anyways.
-         */
-       if (EXT4_SB(sb)->s_sbh->b_blocknr !=
-           le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
-               ext4_warning(sb, "won't resize using backup superblock at %llu",
-                       (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
-               return -EPERM;
-       }
-
        gdb_bh = sb_bread(sb, gdblock);
        if (!gdb_bh)
                return -EIO;
index 43c92b1685cbff914240436f4d0901861c9fd2c2..74c5f53595fbd1d236026f0d78b3071982f89075 100644 (file)
@@ -3482,7 +3482,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
            EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
-               ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are "
+               ext4_warning(sb, "metadata_csum and uninit_bg are "
                             "redundant flags; please run fsck.");
 
        /* Check for a known checksum algorithm */
index 99d440a4a6ba259e5bd7ec6b167dbedb2637ac5d..ee85cd4e136abbff33409fb018343028d21578e2 100644 (file)
@@ -740,14 +740,15 @@ static int __init fcntl_init(void)
         * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
         * is defined as O_NONBLOCK on some platforms and not on others.
         */
-       BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+       BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
                O_RDONLY        | O_WRONLY      | O_RDWR        |
                O_CREAT         | O_EXCL        | O_NOCTTY      |
                O_TRUNC         | O_APPEND      | /* O_NONBLOCK | */
                __O_SYNC        | O_DSYNC       | FASYNC        |
                O_DIRECT        | O_LARGEFILE   | O_DIRECTORY   |
                O_NOFOLLOW      | O_NOATIME     | O_CLOEXEC     |
-               __FMODE_EXEC    | O_PATH        | __O_TMPFILE
+               __FMODE_EXEC    | O_PATH        | __O_TMPFILE   |
+               __FMODE_NONOTIFY
                ));
 
        fasync_cache = kmem_cache_create("fasync_cache",
index ba1107977f2ecafa96cafc04f6498b3fb79a3145..ed19a7d622fa35decaa08b10e83b8bdee8712419 100644 (file)
@@ -131,6 +131,13 @@ static void fuse_req_init_context(struct fuse_req *req)
        req->in.h.pid = current->pid;
 }
 
+void fuse_set_initialized(struct fuse_conn *fc)
+{
+       /* Make sure stores before this are seen on another CPU */
+       smp_wmb();
+       fc->initialized = 1;
+}
+
 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
 {
        return !fc->initialized || (for_background && fc->blocked);
@@ -155,6 +162,8 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
                if (intr)
                        goto out;
        }
+       /* Matches smp_wmb() in fuse_set_initialized() */
+       smp_rmb();
 
        err = -ENOTCONN;
        if (!fc->connected)
@@ -253,6 +262,8 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
 
        atomic_inc(&fc->num_waiting);
        wait_event(fc->blocked_waitq, fc->initialized);
+       /* Matches smp_wmb() in fuse_set_initialized() */
+       smp_rmb();
        req = fuse_request_alloc(0);
        if (!req)
                req = get_reserved_req(fc, file);
@@ -511,6 +522,39 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
 }
 EXPORT_SYMBOL_GPL(fuse_request_send);
 
+static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
+{
+       if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
+               args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
+
+       if (fc->minor < 9) {
+               switch (args->in.h.opcode) {
+               case FUSE_LOOKUP:
+               case FUSE_CREATE:
+               case FUSE_MKNOD:
+               case FUSE_MKDIR:
+               case FUSE_SYMLINK:
+               case FUSE_LINK:
+                       args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
+                       break;
+               case FUSE_GETATTR:
+               case FUSE_SETATTR:
+                       args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
+                       break;
+               }
+       }
+       if (fc->minor < 12) {
+               switch (args->in.h.opcode) {
+               case FUSE_CREATE:
+                       args->in.args[0].size = sizeof(struct fuse_open_in);
+                       break;
+               case FUSE_MKNOD:
+                       args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
+                       break;
+               }
+       }
+}
+
 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
 {
        struct fuse_req *req;
@@ -520,6 +564,9 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
        if (IS_ERR(req))
                return PTR_ERR(req);
 
+       /* Needs to be done after fuse_get_req() so that fc->minor is valid */
+       fuse_adjust_compat(fc, args);
+
        req->in.h.opcode = args->in.h.opcode;
        req->in.h.nodeid = args->in.h.nodeid;
        req->in.numargs = args->in.numargs;
@@ -2127,7 +2174,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
        if (fc->connected) {
                fc->connected = 0;
                fc->blocked = 0;
-               fc->initialized = 1;
+               fuse_set_initialized(fc);
                end_io_requests(fc);
                end_queued_requests(fc);
                end_polls(fc);
@@ -2146,7 +2193,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
                spin_lock(&fc->lock);
                fc->connected = 0;
                fc->blocked = 0;
-               fc->initialized = 1;
+               fuse_set_initialized(fc);
                end_queued_requests(fc);
                end_polls(fc);
                wake_up_all(&fc->blocked_waitq);
index 252b8a5de8b57f71b841d1fc64c9f48e78b641c2..08e7b1a9d5d0edaca8b94ef386d9200078958df3 100644 (file)
@@ -156,10 +156,7 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
        args->in.args[0].size = name->len + 1;
        args->in.args[0].value = name->name;
        args->out.numargs = 1;
-       if (fc->minor < 9)
-               args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
-       else
-               args->out.args[0].size = sizeof(struct fuse_entry_out);
+       args->out.args[0].size = sizeof(struct fuse_entry_out);
        args->out.args[0].value = outarg;
 }
 
@@ -422,16 +419,12 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
        args.in.h.opcode = FUSE_CREATE;
        args.in.h.nodeid = get_node_id(dir);
        args.in.numargs = 2;
-       args.in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
-                                               sizeof(inarg);
+       args.in.args[0].size = sizeof(inarg);
        args.in.args[0].value = &inarg;
        args.in.args[1].size = entry->d_name.len + 1;
        args.in.args[1].value = entry->d_name.name;
        args.out.numargs = 2;
-       if (fc->minor < 9)
-               args.out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
-       else
-               args.out.args[0].size = sizeof(outentry);
+       args.out.args[0].size = sizeof(outentry);
        args.out.args[0].value = &outentry;
        args.out.args[1].size = sizeof(outopen);
        args.out.args[1].value = &outopen;
@@ -539,10 +532,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
        memset(&outarg, 0, sizeof(outarg));
        args->in.h.nodeid = get_node_id(dir);
        args->out.numargs = 1;
-       if (fc->minor < 9)
-               args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
-       else
-               args->out.args[0].size = sizeof(outarg);
+       args->out.args[0].size = sizeof(outarg);
        args->out.args[0].value = &outarg;
        err = fuse_simple_request(fc, args);
        if (err)
@@ -592,8 +582,7 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
        inarg.umask = current_umask();
        args.in.h.opcode = FUSE_MKNOD;
        args.in.numargs = 2;
-       args.in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
-                                               sizeof(inarg);
+       args.in.args[0].size = sizeof(inarg);
        args.in.args[0].value = &inarg;
        args.in.args[1].size = entry->d_name.len + 1;
        args.in.args[1].value = entry->d_name.name;
@@ -899,10 +888,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
        args.in.args[0].size = sizeof(inarg);
        args.in.args[0].value = &inarg;
        args.out.numargs = 1;
-       if (fc->minor < 9)
-               args.out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
-       else
-               args.out.args[0].size = sizeof(outarg);
+       args.out.args[0].size = sizeof(outarg);
        args.out.args[0].value = &outarg;
        err = fuse_simple_request(fc, &args);
        if (!err) {
@@ -1574,10 +1560,7 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
        args->in.args[0].size = sizeof(*inarg_p);
        args->in.args[0].value = inarg_p;
        args->out.numargs = 1;
-       if (fc->minor < 9)
-               args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
-       else
-               args->out.args[0].size = sizeof(*outarg_p);
+       args->out.args[0].size = sizeof(*outarg_p);
        args->out.args[0].value = outarg_p;
 }
 
index e0fc6725d1d0d66a4c3c7dce595239631ba353b1..1cdfb07c1376b4f4b5633e86fdbdfc4320953de2 100644 (file)
@@ -906,4 +906,6 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
 int fuse_do_setattr(struct inode *inode, struct iattr *attr,
                    struct file *file);
 
+void fuse_set_initialized(struct fuse_conn *fc);
+
 #endif /* _FS_FUSE_I_H */
index 6749109f255da69a5c24825aab1f2a25140fbb47..f38256e4476ed2a9101480342bcd0fd90a99fd38 100644 (file)
@@ -424,8 +424,7 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
        args.in.h.opcode = FUSE_STATFS;
        args.in.h.nodeid = get_node_id(dentry->d_inode);
        args.out.numargs = 1;
-       args.out.args[0].size =
-               fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
+       args.out.args[0].size = sizeof(outarg);
        args.out.args[0].value = &outarg;
        err = fuse_simple_request(fc, &args);
        if (!err)
@@ -898,7 +897,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                fc->max_write = max_t(unsigned, 4096, fc->max_write);
                fc->conn_init = 1;
        }
-       fc->initialized = 1;
+       fuse_set_initialized(fc);
        wake_up_all(&fc->blocked_waitq);
 }
 
index c8b148bbdc8b574f77660eb35c8feba4baec2e5e..3e193cb36996d06520e2ed98e49a838fdfbab90d 100644 (file)
@@ -667,7 +667,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
 
 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
                             s64 change, struct gfs2_quota_data *qd,
-                            struct fs_disk_quota *fdq)
+                            struct qc_dqblk *fdq)
 {
        struct inode *inode = &ip->i_inode;
        struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -697,16 +697,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
        be64_add_cpu(&q.qu_value, change);
        qd->qd_qb.qb_value = q.qu_value;
        if (fdq) {
-               if (fdq->d_fieldmask & FS_DQ_BSOFT) {
-                       q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
+               if (fdq->d_fieldmask & QC_SPC_SOFT) {
+                       q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
                        qd->qd_qb.qb_warn = q.qu_warn;
                }
-               if (fdq->d_fieldmask & FS_DQ_BHARD) {
-                       q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
+               if (fdq->d_fieldmask & QC_SPC_HARD) {
+                       q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
                        qd->qd_qb.qb_limit = q.qu_limit;
                }
-               if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
-                       q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
+               if (fdq->d_fieldmask & QC_SPACE) {
+                       q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
                        qd->qd_qb.qb_value = q.qu_value;
                }
        }
@@ -1497,7 +1497,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
 }
 
 static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
-                         struct fs_disk_quota *fdq)
+                         struct qc_dqblk *fdq)
 {
        struct gfs2_sbd *sdp = sb->s_fs_info;
        struct gfs2_quota_lvb *qlvb;
@@ -1505,7 +1505,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
        struct gfs2_holder q_gh;
        int error;
 
-       memset(fdq, 0, sizeof(struct fs_disk_quota));
+       memset(fdq, 0, sizeof(*fdq));
 
        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
                return -ESRCH; /* Crazy XFS error code */
@@ -1522,12 +1522,9 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
                goto out;
 
        qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
-       fdq->d_version = FS_DQUOT_VERSION;
-       fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
-       fdq->d_id = from_kqid_munged(current_user_ns(), qid);
-       fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
-       fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
-       fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
+       fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
+       fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
+       fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
 
        gfs2_glock_dq_uninit(&q_gh);
 out:
@@ -1536,10 +1533,10 @@ out:
 }
 
 /* GFS2 only supports a subset of the XFS fields */
-#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
+#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
 
 static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
-                         struct fs_disk_quota *fdq)
+                         struct qc_dqblk *fdq)
 {
        struct gfs2_sbd *sdp = sb->s_fs_info;
        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
@@ -1583,17 +1580,17 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
                goto out_i;
 
        /* If nothing has changed, this is a no-op */
-       if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
-           ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
-               fdq->d_fieldmask ^= FS_DQ_BSOFT;
+       if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
+           ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
+               fdq->d_fieldmask ^= QC_SPC_SOFT;
 
-       if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
-           ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
-               fdq->d_fieldmask ^= FS_DQ_BHARD;
+       if ((fdq->d_fieldmask & QC_SPC_HARD) &&
+           ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
+               fdq->d_fieldmask ^= QC_SPC_HARD;
 
-       if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
-           ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
-               fdq->d_fieldmask ^= FS_DQ_BCOUNT;
+       if ((fdq->d_fieldmask & QC_SPACE) &&
+           ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
+               fdq->d_fieldmask ^= QC_SPACE;
 
        if (fdq->d_fieldmask == 0)
                goto out_i;
index 37989f02a226ac40e104ee02efdad39d805686c5..2d881b381d2b787bbb2ff40b151e7496c0abafae 100644 (file)
@@ -201,10 +201,14 @@ static unsigned int kernfs_name_hash(const char *name, const void *ns)
 static int kernfs_name_compare(unsigned int hash, const char *name,
                               const void *ns, const struct kernfs_node *kn)
 {
-       if (hash != kn->hash)
-               return hash - kn->hash;
-       if (ns != kn->ns)
-               return ns - kn->ns;
+       if (hash < kn->hash)
+               return -1;
+       if (hash > kn->hash)
+               return 1;
+       if (ns < kn->ns)
+               return -1;
+       if (ns > kn->ns)
+               return 1;
        return strcmp(name, kn->name);
 }
 
index e94c887da2d72f7043ed010bfa8675b4ad825bb6..55505cbe11afa165ec90ec934301c15a1b9a4314 100644 (file)
@@ -138,10 +138,6 @@ lockd(void *vrqstp)
 
        dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
 
-       if (!nlm_timeout)
-               nlm_timeout = LOCKD_DFLT_TIMEO;
-       nlmsvc_timeout = nlm_timeout * HZ;
-
        /*
         * The main request loop. We don't terminate until the last
         * NFS mount or NFS daemon has gone away.
@@ -350,6 +346,10 @@ static struct svc_serv *lockd_create_svc(void)
                printk(KERN_WARNING
                        "lockd_up: no pid, %d users??\n", nlmsvc_users);
 
+       if (!nlm_timeout)
+               nlm_timeout = LOCKD_DFLT_TIMEO;
+       nlmsvc_timeout = nlm_timeout * HZ;
+
        serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup);
        if (!serv) {
                printk(KERN_WARNING "lockd_up: create service failed\n");
index 735b8d3fa78c92bf746aff05475be1fa2a82abd6..59e2f905e4ffea324dbf44faf1b666974adc6c23 100644 (file)
@@ -1702,7 +1702,7 @@ static int generic_delete_lease(struct file *filp)
                        break;
        }
        trace_generic_delete_lease(inode, fl);
-       if (fl)
+       if (fl && IS_LEASE(fl))
                error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose);
        spin_unlock(&inode->i_lock);
        locks_dispose_list(&dispose);
index 10bf07280f4ab2715845003334b73d80bde15f44..294692ff83b1a4e024ff217727fe958edabb0e3d 100644 (file)
@@ -212,6 +212,12 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
  */
 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
+       struct inode *inode = iocb->ki_filp->f_mapping->host;
+
+       /* we only support swap file calling nfs_direct_IO */
+       if (!IS_SWAPFILE(inode))
+               return 0;
+
 #ifndef CONFIG_NFS_SWAP
        dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
                        iocb->ki_filp, (long long) pos, iter->nr_segs);
index 4bffe637ea3255fd2e1e6812556e3907791f4152..2211f6ba873628485fcabf874adf0c3c985a6d84 100644 (file)
@@ -352,8 +352,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
 
        nfs_attr_check_mountpoint(sb, fattr);
 
-       if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) &&
-           !nfs_attr_use_mounted_on_fileid(fattr))
+       if (nfs_attr_use_mounted_on_fileid(fattr))
+               fattr->fileid = fattr->mounted_on_fileid;
+       else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0)
                goto out_no_inode;
        if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
                goto out_no_inode;
index efaa31c70fbe1c43d265c51bc542a8270348c339..b6f34bfa6fe83b271a3b1f6c66e63f9e21587f6d 100644 (file)
@@ -31,8 +31,6 @@ static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
            (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&
             ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))
                return 0;
-
-       fattr->fileid = fattr->mounted_on_fileid;
        return 1;
 }
 
index 03311259b0c45c88de37122cffc0f28c8f6c7e63..706ad10b8186d4401eb4da48ebf0e7fba7686481 100644 (file)
@@ -228,6 +228,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
        kfree(clp->cl_serverowner);
        kfree(clp->cl_serverscope);
        kfree(clp->cl_implid);
+       kfree(clp->cl_owner_id);
 }
 
 void nfs4_free_client(struct nfs_client *clp)
@@ -452,6 +453,14 @@ static void nfs4_swap_callback_idents(struct nfs_client *keep,
        spin_unlock(&nn->nfs_client_lock);
 }
 
+static bool nfs4_match_client_owner_id(const struct nfs_client *clp1,
+               const struct nfs_client *clp2)
+{
+       if (clp1->cl_owner_id == NULL || clp2->cl_owner_id == NULL)
+               return true;
+       return strcmp(clp1->cl_owner_id, clp2->cl_owner_id) == 0;
+}
+
 /**
  * nfs40_walk_client_list - Find server that recognizes a client ID
  *
@@ -483,9 +492,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
                if (pos->rpc_ops != new->rpc_ops)
                        continue;
 
-               if (pos->cl_proto != new->cl_proto)
-                       continue;
-
                if (pos->cl_minorversion != new->cl_minorversion)
                        continue;
 
@@ -510,6 +516,9 @@ int nfs40_walk_client_list(struct nfs_client *new,
                if (pos->cl_clientid != new->cl_clientid)
                        continue;
 
+               if (!nfs4_match_client_owner_id(pos, new))
+                       continue;
+
                atomic_inc(&pos->cl_count);
                spin_unlock(&nn->nfs_client_lock);
 
@@ -566,20 +575,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b)
 }
 
 /*
- * Returns true if the server owners match
+ * Returns true if the server major ids match
  */
 static bool
-nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b)
+nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b)
 {
        struct nfs41_server_owner *o1 = a->cl_serverowner;
        struct nfs41_server_owner *o2 = b->cl_serverowner;
 
-       if (o1->minor_id != o2->minor_id) {
-               dprintk("NFS: --> %s server owner minor IDs do not match\n",
-                       __func__);
-               return false;
-       }
-
        if (o1->major_id_sz != o2->major_id_sz)
                goto out_major_mismatch;
        if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0)
@@ -621,9 +624,6 @@ int nfs41_walk_client_list(struct nfs_client *new,
                if (pos->rpc_ops != new->rpc_ops)
                        continue;
 
-               if (pos->cl_proto != new->cl_proto)
-                       continue;
-
                if (pos->cl_minorversion != new->cl_minorversion)
                        continue;
 
@@ -639,7 +639,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
                        prev = pos;
 
                        status = nfs_wait_client_init_complete(pos);
-                       if (status == 0) {
+                       if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
                                nfs4_schedule_lease_recovery(pos);
                                status = nfs4_wait_clnt_recover(pos);
                        }
@@ -654,7 +654,19 @@ int nfs41_walk_client_list(struct nfs_client *new,
                if (!nfs4_match_clientids(pos, new))
                        continue;
 
-               if (!nfs4_match_serverowners(pos, new))
+               /*
+                * Note that session trunking is just a special subcase of
+                * client id trunking. In either case, we want to fall back
+                * to using the existing nfs_client.
+                */
+               if (!nfs4_check_clientid_trunking(pos, new))
+                       continue;
+
+               /* Unlike NFSv4.0, we know that NFSv4.1 always uses the
+                * uniform string, however someone might switch the
+                * uniquifier string on us.
+                */
+               if (!nfs4_match_client_owner_id(pos, new))
                        continue;
 
                atomic_inc(&pos->cl_count);
index e7f8d5ff2581a98269a262998beb43ccaca23e3c..c347705b016104de8b0360f163e87c19d9b4d78c 100644 (file)
@@ -1117,8 +1117,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
                return 0;
        if ((delegation->type & fmode) != fmode)
                return 0;
-       if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
-               return 0;
        if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
                return 0;
        nfs_mark_delegation_referenced(delegation);
@@ -4917,11 +4915,14 @@ static void nfs4_init_boot_verifier(const struct nfs_client *clp,
 }
 
 static unsigned int
-nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
+nfs4_init_nonuniform_client_string(struct nfs_client *clp,
                                   char *buf, size_t len)
 {
        unsigned int result;
 
+       if (clp->cl_owner_id != NULL)
+               return strlcpy(buf, clp->cl_owner_id, len);
+
        rcu_read_lock();
        result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s",
                                clp->cl_ipaddr,
@@ -4930,24 +4931,32 @@ nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
                                rpc_peeraddr2str(clp->cl_rpcclient,
                                                        RPC_DISPLAY_PROTO));
        rcu_read_unlock();
+       clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
        return result;
 }
 
 static unsigned int
-nfs4_init_uniform_client_string(const struct nfs_client *clp,
+nfs4_init_uniform_client_string(struct nfs_client *clp,
                                char *buf, size_t len)
 {
        const char *nodename = clp->cl_rpcclient->cl_nodename;
+       unsigned int result;
+
+       if (clp->cl_owner_id != NULL)
+               return strlcpy(buf, clp->cl_owner_id, len);
 
        if (nfs4_client_id_uniquifier[0] != '\0')
-               return scnprintf(buf, len, "Linux NFSv%u.%u %s/%s",
+               result = scnprintf(buf, len, "Linux NFSv%u.%u %s/%s",
                                clp->rpc_ops->version,
                                clp->cl_minorversion,
                                nfs4_client_id_uniquifier,
                                nodename);
-       return scnprintf(buf, len, "Linux NFSv%u.%u %s",
+       else
+               result = scnprintf(buf, len, "Linux NFSv%u.%u %s",
                                clp->rpc_ops->version, clp->cl_minorversion,
                                nodename);
+       clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
+       return result;
 }
 
 /*
index 3550a9c876161afe908274c937591c5f40d9f78a..c06a1ba80d73e5fd2cd6c68f61210e8be9518993 100644 (file)
@@ -3897,11 +3897,11 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
                status = nfs4_setlease(dp);
                goto out;
        }
-       atomic_inc(&fp->fi_delegees);
        if (fp->fi_had_conflict) {
                status = -EAGAIN;
                goto out_unlock;
        }
+       atomic_inc(&fp->fi_delegees);
        hash_delegation_locked(dp, fp);
        status = 0;
 out_unlock:
index c991616acca9ef86e99d1e33dfd9d427adfb8b5f..bff8567aa42d1b04cd85e6f2863de3b1f4d9a2db 100644 (file)
@@ -259,16 +259,15 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
        struct fsnotify_event *kevent;
        char __user *start;
        int ret;
-       DEFINE_WAIT(wait);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
        start = buf;
        group = file->private_data;
 
        pr_debug("%s: group=%p\n", __func__, group);
 
+       add_wait_queue(&group->notification_waitq, &wait);
        while (1) {
-               prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
-
                mutex_lock(&group->notification_mutex);
                kevent = get_one_event(group, count);
                mutex_unlock(&group->notification_mutex);
@@ -289,7 +288,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
 
                        if (start != buf)
                                break;
-                       schedule();
+
+                       wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
                        continue;
                }
 
@@ -318,8 +318,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
                buf += ret;
                count -= ret;
        }
+       remove_wait_queue(&group->notification_waitq, &wait);
 
-       finish_wait(&group->notification_waitq, &wait);
        if (start != buf && ret != -EFAULT)
                ret = buf - start;
        return ret;
index 79b5af5e6a7b5d8a593a251cfa8c27aa11769447..cecd875653e4cc12d4326e7bf3e192106c0a94c0 100644 (file)
@@ -2023,11 +2023,8 @@ leave:
        dlm_lockres_drop_inflight_ref(dlm, res);
        spin_unlock(&res->spinlock);
 
-       if (ret < 0) {
+       if (ret < 0)
                mlog_errno(ret);
-               if (newlock)
-                       dlm_lock_put(newlock);
-       }
 
        return ret;
 }
index b931e04e33889742a6192255b3bd95d8779203ea..914c121ec8900380482f83728b90f7b0bd14e418 100644 (file)
@@ -94,6 +94,14 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
                                     struct inode *inode,
                                     const char *symname);
 
+static int ocfs2_double_lock(struct ocfs2_super *osb,
+                            struct buffer_head **bh1,
+                            struct inode *inode1,
+                            struct buffer_head **bh2,
+                            struct inode *inode2,
+                            int rename);
+
+static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2);
 /* An orphan dir name is an 8 byte value, printed as a hex string */
 #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64)))
 
@@ -678,8 +686,10 @@ static int ocfs2_link(struct dentry *old_dentry,
 {
        handle_t *handle;
        struct inode *inode = old_dentry->d_inode;
+       struct inode *old_dir = old_dentry->d_parent->d_inode;
        int err;
        struct buffer_head *fe_bh = NULL;
+       struct buffer_head *old_dir_bh = NULL;
        struct buffer_head *parent_fe_bh = NULL;
        struct ocfs2_dinode *fe = NULL;
        struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
@@ -696,19 +706,33 @@ static int ocfs2_link(struct dentry *old_dentry,
 
        dquot_initialize(dir);
 
-       err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT);
+       err = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
+                       &parent_fe_bh, dir, 0);
        if (err < 0) {
                if (err != -ENOENT)
                        mlog_errno(err);
                return err;
        }
 
+       /* make sure both dirs have bhs
+        * get an extra ref on old_dir_bh if old==new */
+       if (!parent_fe_bh) {
+               if (old_dir_bh) {
+                       parent_fe_bh = old_dir_bh;
+                       get_bh(parent_fe_bh);
+               } else {
+                       mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str);
+                       err = -EIO;
+                       goto out;
+               }
+       }
+
        if (!dir->i_nlink) {
                err = -ENOENT;
                goto out;
        }
 
-       err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name,
+       err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name,
                        old_dentry->d_name.len, &old_de_ino);
        if (err) {
                err = -ENOENT;
@@ -801,10 +825,11 @@ out_unlock_inode:
        ocfs2_inode_unlock(inode, 1);
 
 out:
-       ocfs2_inode_unlock(dir, 1);
+       ocfs2_double_unlock(old_dir, dir);
 
        brelse(fe_bh);
        brelse(parent_fe_bh);
+       brelse(old_dir_bh);
 
        ocfs2_free_dir_lookup_result(&lookup);
 
@@ -1072,14 +1097,15 @@ static int ocfs2_check_if_ancestor(struct ocfs2_super *osb,
 }
 
 /*
- * The only place this should be used is rename!
+ * The only place this should be used is rename and link!
  * if they have the same id, then the 1st one is the only one locked.
  */
 static int ocfs2_double_lock(struct ocfs2_super *osb,
                             struct buffer_head **bh1,
                             struct inode *inode1,
                             struct buffer_head **bh2,
-                            struct inode *inode2)
+                            struct inode *inode2,
+                            int rename)
 {
        int status;
        int inode1_is_ancestor, inode2_is_ancestor;
@@ -1127,7 +1153,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
                }
                /* lock id2 */
                status = ocfs2_inode_lock_nested(inode2, bh2, 1,
-                                                OI_LS_RENAME1);
+                               rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT);
                if (status < 0) {
                        if (status != -ENOENT)
                                mlog_errno(status);
@@ -1136,7 +1162,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
        }
 
        /* lock id1 */
-       status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2);
+       status = ocfs2_inode_lock_nested(inode1, bh1, 1,
+                       rename == 1 ?  OI_LS_RENAME2 : OI_LS_PARENT);
        if (status < 0) {
                /*
                 * An error return must mean that no cluster locks
@@ -1252,7 +1279,7 @@ static int ocfs2_rename(struct inode *old_dir,
 
        /* if old and new are the same, this'll just do one lock. */
        status = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
-                                  &new_dir_bh, new_dir);
+                                  &new_dir_bh, new_dir, 1);
        if (status < 0) {
                mlog_errno(status);
                goto bail;
index 8f0acef3d18481647507d32e1861420007f38e3d..69df5b239844f9395f38d2e3acb8f93f34142c0a 100644 (file)
@@ -2396,30 +2396,25 @@ static inline qsize_t stoqb(qsize_t space)
 }
 
 /* Generic routine for getting common part of quota structure */
-static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
+static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
 {
        struct mem_dqblk *dm = &dquot->dq_dqb;
 
        memset(di, 0, sizeof(*di));
-       di->d_version = FS_DQUOT_VERSION;
-       di->d_flags = dquot->dq_id.type == USRQUOTA ?
-                       FS_USER_QUOTA : FS_GROUP_QUOTA;
-       di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
-
        spin_lock(&dq_data_lock);
-       di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
-       di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
+       di->d_spc_hardlimit = dm->dqb_bhardlimit;
+       di->d_spc_softlimit = dm->dqb_bsoftlimit;
        di->d_ino_hardlimit = dm->dqb_ihardlimit;
        di->d_ino_softlimit = dm->dqb_isoftlimit;
-       di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
-       di->d_icount = dm->dqb_curinodes;
-       di->d_btimer = dm->dqb_btime;
-       di->d_itimer = dm->dqb_itime;
+       di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
+       di->d_ino_count = dm->dqb_curinodes;
+       di->d_spc_timer = dm->dqb_btime;
+       di->d_ino_timer = dm->dqb_itime;
        spin_unlock(&dq_data_lock);
 }
 
 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
-                   struct fs_disk_quota *di)
+                   struct qc_dqblk *di)
 {
        struct dquot *dquot;
 
@@ -2433,70 +2428,70 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
 }
 EXPORT_SYMBOL(dquot_get_dqblk);
 
-#define VFS_FS_DQ_MASK \
-       (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
-        FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
-        FS_DQ_BTIMER | FS_DQ_ITIMER)
+#define VFS_QC_MASK \
+       (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
+        QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
+        QC_SPC_TIMER | QC_INO_TIMER)
 
 /* Generic routine for setting common part of quota structure */
-static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
+static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
 {
        struct mem_dqblk *dm = &dquot->dq_dqb;
        int check_blim = 0, check_ilim = 0;
        struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
 
-       if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
+       if (di->d_fieldmask & ~VFS_QC_MASK)
                return -EINVAL;
 
-       if (((di->d_fieldmask & FS_DQ_BSOFT) &&
-            (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
-           ((di->d_fieldmask & FS_DQ_BHARD) &&
-            (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
-           ((di->d_fieldmask & FS_DQ_ISOFT) &&
+       if (((di->d_fieldmask & QC_SPC_SOFT) &&
+            stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) ||
+           ((di->d_fieldmask & QC_SPC_HARD) &&
+            stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) ||
+           ((di->d_fieldmask & QC_INO_SOFT) &&
             (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
-           ((di->d_fieldmask & FS_DQ_IHARD) &&
+           ((di->d_fieldmask & QC_INO_HARD) &&
             (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
                return -ERANGE;
 
        spin_lock(&dq_data_lock);
-       if (di->d_fieldmask & FS_DQ_BCOUNT) {
-               dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
+       if (di->d_fieldmask & QC_SPACE) {
+               dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
                check_blim = 1;
                set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
        }
 
-       if (di->d_fieldmask & FS_DQ_BSOFT)
-               dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
-       if (di->d_fieldmask & FS_DQ_BHARD)
-               dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
-       if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
+       if (di->d_fieldmask & QC_SPC_SOFT)
+               dm->dqb_bsoftlimit = di->d_spc_softlimit;
+       if (di->d_fieldmask & QC_SPC_HARD)
+               dm->dqb_bhardlimit = di->d_spc_hardlimit;
+       if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
                check_blim = 1;
                set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
        }
 
-       if (di->d_fieldmask & FS_DQ_ICOUNT) {
-               dm->dqb_curinodes = di->d_icount;
+       if (di->d_fieldmask & QC_INO_COUNT) {
+               dm->dqb_curinodes = di->d_ino_count;
                check_ilim = 1;
                set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
        }
 
-       if (di->d_fieldmask & FS_DQ_ISOFT)
+       if (di->d_fieldmask & QC_INO_SOFT)
                dm->dqb_isoftlimit = di->d_ino_softlimit;
-       if (di->d_fieldmask & FS_DQ_IHARD)
+       if (di->d_fieldmask & QC_INO_HARD)
                dm->dqb_ihardlimit = di->d_ino_hardlimit;
-       if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
+       if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
                check_ilim = 1;
                set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
        }
 
-       if (di->d_fieldmask & FS_DQ_BTIMER) {
-               dm->dqb_btime = di->d_btimer;
+       if (di->d_fieldmask & QC_SPC_TIMER) {
+               dm->dqb_btime = di->d_spc_timer;
                check_blim = 1;
                set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
        }
 
-       if (di->d_fieldmask & FS_DQ_ITIMER) {
-               dm->dqb_itime = di->d_itimer;
+       if (di->d_fieldmask & QC_INO_TIMER) {
+               dm->dqb_itime = di->d_ino_timer;
                check_ilim = 1;
                set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
        }
@@ -2506,7 +2501,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
                    dm->dqb_curspace < dm->dqb_bsoftlimit) {
                        dm->dqb_btime = 0;
                        clear_bit(DQ_BLKS_B, &dquot->dq_flags);
-               } else if (!(di->d_fieldmask & FS_DQ_BTIMER))
+               } else if (!(di->d_fieldmask & QC_SPC_TIMER))
                        /* Set grace only if user hasn't provided his own... */
                        dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
        }
@@ -2515,7 +2510,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
                    dm->dqb_curinodes < dm->dqb_isoftlimit) {
                        dm->dqb_itime = 0;
                        clear_bit(DQ_INODES_B, &dquot->dq_flags);
-               } else if (!(di->d_fieldmask & FS_DQ_ITIMER))
+               } else if (!(di->d_fieldmask & QC_INO_TIMER))
                        /* Set grace only if user hasn't provided his own... */
                        dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
        }
@@ -2531,7 +2526,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
 }
 
 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
-                 struct fs_disk_quota *di)
+                 struct qc_dqblk *di)
 {
        struct dquot *dquot;
        int rc;
index 2aa4151f99d2e5e9183adfb34e2c31da24e2faa2..6f3856328eeabd4bbc57b09ca9c8fe01a9827c9c 100644 (file)
@@ -118,17 +118,27 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
        return sb->s_qcop->set_info(sb, type, &info);
 }
 
-static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
+static inline qsize_t qbtos(qsize_t blocks)
+{
+       return blocks << QIF_DQBLKSIZE_BITS;
+}
+
+static inline qsize_t stoqb(qsize_t space)
+{
+       return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
+}
+
+static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
 {
        memset(dst, 0, sizeof(*dst));
-       dst->dqb_bhardlimit = src->d_blk_hardlimit;
-       dst->dqb_bsoftlimit = src->d_blk_softlimit;
-       dst->dqb_curspace = src->d_bcount;
+       dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
+       dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
+       dst->dqb_curspace = src->d_space;
        dst->dqb_ihardlimit = src->d_ino_hardlimit;
        dst->dqb_isoftlimit = src->d_ino_softlimit;
-       dst->dqb_curinodes = src->d_icount;
-       dst->dqb_btime = src->d_btimer;
-       dst->dqb_itime = src->d_itimer;
+       dst->dqb_curinodes = src->d_ino_count;
+       dst->dqb_btime = src->d_spc_timer;
+       dst->dqb_itime = src->d_ino_timer;
        dst->dqb_valid = QIF_ALL;
 }
 
@@ -136,7 +146,7 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
                          void __user *addr)
 {
        struct kqid qid;
-       struct fs_disk_quota fdq;
+       struct qc_dqblk fdq;
        struct if_dqblk idq;
        int ret;
 
@@ -154,36 +164,36 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
        return 0;
 }
 
-static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src)
+static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
 {
-       dst->d_blk_hardlimit = src->dqb_bhardlimit;
-       dst->d_blk_softlimit  = src->dqb_bsoftlimit;
-       dst->d_bcount = src->dqb_curspace;
+       dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
+       dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
+       dst->d_space = src->dqb_curspace;
        dst->d_ino_hardlimit = src->dqb_ihardlimit;
        dst->d_ino_softlimit = src->dqb_isoftlimit;
-       dst->d_icount = src->dqb_curinodes;
-       dst->d_btimer = src->dqb_btime;
-       dst->d_itimer = src->dqb_itime;
+       dst->d_ino_count = src->dqb_curinodes;
+       dst->d_spc_timer = src->dqb_btime;
+       dst->d_ino_timer = src->dqb_itime;
 
        dst->d_fieldmask = 0;
        if (src->dqb_valid & QIF_BLIMITS)
-               dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
+               dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
        if (src->dqb_valid & QIF_SPACE)
-               dst->d_fieldmask |= FS_DQ_BCOUNT;
+               dst->d_fieldmask |= QC_SPACE;
        if (src->dqb_valid & QIF_ILIMITS)
-               dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
+               dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
        if (src->dqb_valid & QIF_INODES)
-               dst->d_fieldmask |= FS_DQ_ICOUNT;
+               dst->d_fieldmask |= QC_INO_COUNT;
        if (src->dqb_valid & QIF_BTIME)
-               dst->d_fieldmask |= FS_DQ_BTIMER;
+               dst->d_fieldmask |= QC_SPC_TIMER;
        if (src->dqb_valid & QIF_ITIME)
-               dst->d_fieldmask |= FS_DQ_ITIMER;
+               dst->d_fieldmask |= QC_INO_TIMER;
 }
 
 static int quota_setquota(struct super_block *sb, int type, qid_t id,
                          void __user *addr)
 {
-       struct fs_disk_quota fdq;
+       struct qc_dqblk fdq;
        struct if_dqblk idq;
        struct kqid qid;
 
@@ -247,10 +257,78 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr)
        return ret;
 }
 
+/*
+ * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
+ * out of there as xfsprogs rely on definitions being in that header file. So
+ * just define same functions here for quota purposes.
+ */
+#define XFS_BB_SHIFT 9
+
+static inline u64 quota_bbtob(u64 blocks)
+{
+       return blocks << XFS_BB_SHIFT;
+}
+
+static inline u64 quota_btobb(u64 bytes)
+{
+       return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
+}
+
+static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
+{
+       dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
+       dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
+       dst->d_ino_hardlimit = src->d_ino_hardlimit;
+       dst->d_ino_softlimit = src->d_ino_softlimit;
+       dst->d_space = quota_bbtob(src->d_bcount);
+       dst->d_ino_count = src->d_icount;
+       dst->d_ino_timer = src->d_itimer;
+       dst->d_spc_timer = src->d_btimer;
+       dst->d_ino_warns = src->d_iwarns;
+       dst->d_spc_warns = src->d_bwarns;
+       dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
+       dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
+       dst->d_rt_space = quota_bbtob(src->d_rtbcount);
+       dst->d_rt_spc_timer = src->d_rtbtimer;
+       dst->d_rt_spc_warns = src->d_rtbwarns;
+       dst->d_fieldmask = 0;
+       if (src->d_fieldmask & FS_DQ_ISOFT)
+               dst->d_fieldmask |= QC_INO_SOFT;
+       if (src->d_fieldmask & FS_DQ_IHARD)
+               dst->d_fieldmask |= QC_INO_HARD;
+       if (src->d_fieldmask & FS_DQ_BSOFT)
+               dst->d_fieldmask |= QC_SPC_SOFT;
+       if (src->d_fieldmask & FS_DQ_BHARD)
+               dst->d_fieldmask |= QC_SPC_HARD;
+       if (src->d_fieldmask & FS_DQ_RTBSOFT)
+               dst->d_fieldmask |= QC_RT_SPC_SOFT;
+       if (src->d_fieldmask & FS_DQ_RTBHARD)
+               dst->d_fieldmask |= QC_RT_SPC_HARD;
+       if (src->d_fieldmask & FS_DQ_BTIMER)
+               dst->d_fieldmask |= QC_SPC_TIMER;
+       if (src->d_fieldmask & FS_DQ_ITIMER)
+               dst->d_fieldmask |= QC_INO_TIMER;
+       if (src->d_fieldmask & FS_DQ_RTBTIMER)
+               dst->d_fieldmask |= QC_RT_SPC_TIMER;
+       if (src->d_fieldmask & FS_DQ_BWARNS)
+               dst->d_fieldmask |= QC_SPC_WARNS;
+       if (src->d_fieldmask & FS_DQ_IWARNS)
+               dst->d_fieldmask |= QC_INO_WARNS;
+       if (src->d_fieldmask & FS_DQ_RTBWARNS)
+               dst->d_fieldmask |= QC_RT_SPC_WARNS;
+       if (src->d_fieldmask & FS_DQ_BCOUNT)
+               dst->d_fieldmask |= QC_SPACE;
+       if (src->d_fieldmask & FS_DQ_ICOUNT)
+               dst->d_fieldmask |= QC_INO_COUNT;
+       if (src->d_fieldmask & FS_DQ_RTBCOUNT)
+               dst->d_fieldmask |= QC_RT_SPACE;
+}
+
 static int quota_setxquota(struct super_block *sb, int type, qid_t id,
                           void __user *addr)
 {
        struct fs_disk_quota fdq;
+       struct qc_dqblk qdq;
        struct kqid qid;
 
        if (copy_from_user(&fdq, addr, sizeof(fdq)))
@@ -260,13 +338,44 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
        qid = make_kqid(current_user_ns(), type, id);
        if (!qid_valid(qid))
                return -EINVAL;
-       return sb->s_qcop->set_dqblk(sb, qid, &fdq);
+       copy_from_xfs_dqblk(&qdq, &fdq);
+       return sb->s_qcop->set_dqblk(sb, qid, &qdq);
+}
+
+static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
+                             int type, qid_t id)
+{
+       memset(dst, 0, sizeof(*dst));
+       dst->d_version = FS_DQUOT_VERSION;
+       dst->d_id = id;
+       if (type == USRQUOTA)
+               dst->d_flags = FS_USER_QUOTA;
+       else if (type == PRJQUOTA)
+               dst->d_flags = FS_PROJ_QUOTA;
+       else
+               dst->d_flags = FS_GROUP_QUOTA;
+       dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
+       dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
+       dst->d_ino_hardlimit = src->d_ino_hardlimit;
+       dst->d_ino_softlimit = src->d_ino_softlimit;
+       dst->d_bcount = quota_btobb(src->d_space);
+       dst->d_icount = src->d_ino_count;
+       dst->d_itimer = src->d_ino_timer;
+       dst->d_btimer = src->d_spc_timer;
+       dst->d_iwarns = src->d_ino_warns;
+       dst->d_bwarns = src->d_spc_warns;
+       dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
+       dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
+       dst->d_rtbcount = quota_btobb(src->d_rt_space);
+       dst->d_rtbtimer = src->d_rt_spc_timer;
+       dst->d_rtbwarns = src->d_rt_spc_warns;
 }
 
 static int quota_getxquota(struct super_block *sb, int type, qid_t id,
                           void __user *addr)
 {
        struct fs_disk_quota fdq;
+       struct qc_dqblk qdq;
        struct kqid qid;
        int ret;
 
@@ -275,8 +384,11 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
        qid = make_kqid(current_user_ns(), type, id);
        if (!qid_valid(qid))
                return -EINVAL;
-       ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
-       if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
+       ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
+       if (ret)
+               return ret;
+       copy_to_xfs_dqblk(&fdq, &qdq, type, id);
+       if (copy_to_user(addr, &fdq, sizeof(fdq)))
                return -EFAULT;
        return ret;
 }
index bb15771b92ae32ae02390179492647a1d87c9dc2..08f3555fbeac3f6ceeda033cb8f6ec82557623d0 100644 (file)
@@ -224,7 +224,7 @@ out:
 static int udf_release_file(struct inode *inode, struct file *filp)
 {
        if (filp->f_mode & FMODE_WRITE &&
-           atomic_read(&inode->i_writecount) > 1) {
+           atomic_read(&inode->i_writecount) == 1) {
                /*
                 * Grab i_mutex to avoid races with writes changing i_size
                 * while we are running.
index 3a07a937e232a7e51bf089f981a664f782f5a75f..41f6c0b9d51cd3dc3d8b22b2fe37fcf78cd064bf 100644 (file)
@@ -166,9 +166,9 @@ extern void         xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
 /* quota ops */
 extern int             xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
 extern int             xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t,
-                                       uint, struct fs_disk_quota *);
+                                       uint, struct qc_dqblk *);
 extern int             xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint,
-                                       struct fs_disk_quota *);
+                                       struct qc_dqblk *);
 extern int             xfs_qm_scall_getqstat(struct xfs_mount *,
                                        struct fs_quota_stat *);
 extern int             xfs_qm_scall_getqstatv(struct xfs_mount *,
index 74fca68e43b6b4e98a0d203204d4ce2f646ff8fa..cb6168ec92c9e0763640d5cc4eeff12fc89f6ef0 100644 (file)
@@ -39,7 +39,6 @@ STATIC int    xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
 STATIC int     xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
                                        uint);
 STATIC uint    xfs_qm_export_flags(uint);
-STATIC uint    xfs_qm_export_qtype_flags(uint);
 
 /*
  * Turn off quota accounting and/or enforcement for all udquots and/or
@@ -573,8 +572,8 @@ xfs_qm_scall_getqstatv(
        return 0;
 }
 
-#define XFS_DQ_MASK \
-       (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
+#define XFS_QC_MASK \
+       (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
 
 /*
  * Adjust quota limits, and start/stop timers accordingly.
@@ -584,7 +583,7 @@ xfs_qm_scall_setqlim(
        struct xfs_mount        *mp,
        xfs_dqid_t              id,
        uint                    type,
-       fs_disk_quota_t         *newlim)
+       struct qc_dqblk         *newlim)
 {
        struct xfs_quotainfo    *q = mp->m_quotainfo;
        struct xfs_disk_dquot   *ddq;
@@ -593,9 +592,9 @@ xfs_qm_scall_setqlim(
        int                     error;
        xfs_qcnt_t              hard, soft;
 
-       if (newlim->d_fieldmask & ~XFS_DQ_MASK)
+       if (newlim->d_fieldmask & ~XFS_QC_MASK)
                return -EINVAL;
-       if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
+       if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
                return 0;
 
        /*
@@ -633,11 +632,11 @@ xfs_qm_scall_setqlim(
        /*
         * Make sure that hardlimits are >= soft limits before changing.
         */
-       hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
-               (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
+       hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
+               (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
                        be64_to_cpu(ddq->d_blk_hardlimit);
-       soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
-               (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
+       soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
+               (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
                        be64_to_cpu(ddq->d_blk_softlimit);
        if (hard == 0 || hard >= soft) {
                ddq->d_blk_hardlimit = cpu_to_be64(hard);
@@ -650,11 +649,11 @@ xfs_qm_scall_setqlim(
        } else {
                xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
        }
-       hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
-               (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
+       hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
+               (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
                        be64_to_cpu(ddq->d_rtb_hardlimit);
-       soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
-               (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
+       soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
+               (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
                        be64_to_cpu(ddq->d_rtb_softlimit);
        if (hard == 0 || hard >= soft) {
                ddq->d_rtb_hardlimit = cpu_to_be64(hard);
@@ -667,10 +666,10 @@ xfs_qm_scall_setqlim(
                xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
        }
 
-       hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
+       hard = (newlim->d_fieldmask & QC_INO_HARD) ?
                (xfs_qcnt_t) newlim->d_ino_hardlimit :
                        be64_to_cpu(ddq->d_ino_hardlimit);
-       soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
+       soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
                (xfs_qcnt_t) newlim->d_ino_softlimit :
                        be64_to_cpu(ddq->d_ino_softlimit);
        if (hard == 0 || hard >= soft) {
@@ -687,12 +686,12 @@ xfs_qm_scall_setqlim(
        /*
         * Update warnings counter(s) if requested
         */
-       if (newlim->d_fieldmask & FS_DQ_BWARNS)
-               ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
-       if (newlim->d_fieldmask & FS_DQ_IWARNS)
-               ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
-       if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
-               ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
+       if (newlim->d_fieldmask & QC_SPC_WARNS)
+               ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
+       if (newlim->d_fieldmask & QC_INO_WARNS)
+               ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
+       if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
+               ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
 
        if (id == 0) {
                /*
@@ -702,24 +701,24 @@ xfs_qm_scall_setqlim(
                 * soft and hard limit values (already done, above), and
                 * for warnings.
                 */
-               if (newlim->d_fieldmask & FS_DQ_BTIMER) {
-                       q->qi_btimelimit = newlim->d_btimer;
-                       ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
+               if (newlim->d_fieldmask & QC_SPC_TIMER) {
+                       q->qi_btimelimit = newlim->d_spc_timer;
+                       ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
                }
-               if (newlim->d_fieldmask & FS_DQ_ITIMER) {
-                       q->qi_itimelimit = newlim->d_itimer;
-                       ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
+               if (newlim->d_fieldmask & QC_INO_TIMER) {
+                       q->qi_itimelimit = newlim->d_ino_timer;
+                       ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
                }
-               if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
-                       q->qi_rtbtimelimit = newlim->d_rtbtimer;
-                       ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
+               if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
+                       q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
+                       ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
                }
-               if (newlim->d_fieldmask & FS_DQ_BWARNS)
-                       q->qi_bwarnlimit = newlim->d_bwarns;
-               if (newlim->d_fieldmask & FS_DQ_IWARNS)
-                       q->qi_iwarnlimit = newlim->d_iwarns;
-               if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
-                       q->qi_rtbwarnlimit = newlim->d_rtbwarns;
+               if (newlim->d_fieldmask & QC_SPC_WARNS)
+                       q->qi_bwarnlimit = newlim->d_spc_warns;
+               if (newlim->d_fieldmask & QC_INO_WARNS)
+                       q->qi_iwarnlimit = newlim->d_ino_warns;
+               if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
+                       q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
        } else {
                /*
                 * If the user is now over quota, start the timelimit.
@@ -824,7 +823,7 @@ xfs_qm_scall_getquota(
        struct xfs_mount        *mp,
        xfs_dqid_t              id,
        uint                    type,
-       struct fs_disk_quota    *dst)
+       struct qc_dqblk         *dst)
 {
        struct xfs_dquot        *dqp;
        int                     error;
@@ -848,28 +847,25 @@ xfs_qm_scall_getquota(
        }
 
        memset(dst, 0, sizeof(*dst));
-       dst->d_version = FS_DQUOT_VERSION;
-       dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
-       dst->d_id = be32_to_cpu(dqp->q_core.d_id);
-       dst->d_blk_hardlimit =
-               XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
-       dst->d_blk_softlimit =
-               XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
+       dst->d_spc_hardlimit =
+               XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
+       dst->d_spc_softlimit =
+               XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
        dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
        dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
-       dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
-       dst->d_icount = dqp->q_res_icount;
-       dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
-       dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
-       dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
-       dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
-       dst->d_rtb_hardlimit =
-               XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
-       dst->d_rtb_softlimit =
-               XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
-       dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
-       dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
-       dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
+       dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
+       dst->d_ino_count = dqp->q_res_icount;
+       dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
+       dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
+       dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
+       dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
+       dst->d_rt_spc_hardlimit =
+               XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
+       dst->d_rt_spc_softlimit =
+               XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
+       dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
+       dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
+       dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
 
        /*
         * Internally, we don't reset all the timers when quota enforcement
@@ -882,23 +878,23 @@ xfs_qm_scall_getquota(
             dqp->q_core.d_flags == XFS_DQ_GROUP) ||
            (!XFS_IS_PQUOTA_ENFORCED(mp) &&
             dqp->q_core.d_flags == XFS_DQ_PROJ)) {
-               dst->d_btimer = 0;
-               dst->d_itimer = 0;
-               dst->d_rtbtimer = 0;
+               dst->d_spc_timer = 0;
+               dst->d_ino_timer = 0;
+               dst->d_rt_spc_timer = 0;
        }
 
 #ifdef DEBUG
-       if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
-            (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) ||
-            (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) &&
-           dst->d_id != 0) {
-               if ((dst->d_bcount > dst->d_blk_softlimit) &&
-                   (dst->d_blk_softlimit > 0)) {
-                       ASSERT(dst->d_btimer != 0);
+       if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
+            (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
+            (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
+           id != 0) {
+               if ((dst->d_space > dst->d_spc_softlimit) &&
+                   (dst->d_spc_softlimit > 0)) {
+                       ASSERT(dst->d_spc_timer != 0);
                }
-               if ((dst->d_icount > dst->d_ino_softlimit) &&
+               if ((dst->d_ino_count > dst->d_ino_softlimit) &&
                    (dst->d_ino_softlimit > 0)) {
-                       ASSERT(dst->d_itimer != 0);
+                       ASSERT(dst->d_ino_timer != 0);
                }
        }
 #endif
@@ -907,26 +903,6 @@ out_put:
        return error;
 }
 
-STATIC uint
-xfs_qm_export_qtype_flags(
-       uint flags)
-{
-       /*
-        * Can't be more than one, or none.
-        */
-       ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
-               (FS_PROJ_QUOTA | FS_USER_QUOTA));
-       ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
-               (FS_PROJ_QUOTA | FS_GROUP_QUOTA));
-       ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
-               (FS_USER_QUOTA | FS_GROUP_QUOTA));
-       ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
-
-       return (flags & XFS_DQ_USER) ?
-               FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
-                       FS_PROJ_QUOTA : FS_GROUP_QUOTA;
-}
-
 STATIC uint
 xfs_qm_export_flags(
        uint flags)
index 7542bbeca6a12b18ae1162a51a8dd97f6c35790e..801a84c1cdc3c76d86ae413cf4a1dfddd89cdc92 100644 (file)
@@ -131,7 +131,7 @@ STATIC int
 xfs_fs_get_dqblk(
        struct super_block      *sb,
        struct kqid             qid,
-       struct fs_disk_quota    *fdq)
+       struct qc_dqblk         *qdq)
 {
        struct xfs_mount        *mp = XFS_M(sb);
 
@@ -141,14 +141,14 @@ xfs_fs_get_dqblk(
                return -ESRCH;
 
        return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid),
-                                     xfs_quota_type(qid.type), fdq);
+                                     xfs_quota_type(qid.type), qdq);
 }
 
 STATIC int
 xfs_fs_set_dqblk(
        struct super_block      *sb,
        struct kqid             qid,
-       struct fs_disk_quota    *fdq)
+       struct qc_dqblk         *qdq)
 {
        struct xfs_mount        *mp = XFS_M(sb);
 
@@ -160,7 +160,7 @@ xfs_fs_set_dqblk(
                return -ESRCH;
 
        return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid),
-                                    xfs_quota_type(qid.type), fdq);
+                                    xfs_quota_type(qid.type), qdq);
 }
 
 const struct quotactl_ops xfs_quotactl_operations = {
index 3ca9b751f1224cfd9a6816ebebe38d7559c7bcd0..b95dc32a6e6b61aefac9fefce83c6a3da20fa111 100644 (file)
@@ -196,8 +196,8 @@ struct acpi_processor_flags {
 struct acpi_processor {
        acpi_handle handle;
        u32 acpi_id;
-       u32 apic_id;
-       u32 id;
+       u32 phys_id;    /* CPU hardware ID such as APIC ID for x86 */
+       u32 id;         /* CPU logical ID allocated by OS */
        u32 pblk;
        int performance_platform_limit;
        int throttling_platform_limit;
@@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
 #endif                         /* CONFIG_CPU_FREQ */
 
 /* in processor_core.c */
-int acpi_get_apicid(acpi_handle, int type, u32 acpi_id);
-int acpi_map_cpuid(int apic_id, u32 acpi_id);
+int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
+int acpi_map_cpuid(int phys_id, u32 acpi_id);
 int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
 
 /* in processor_pdc.c */
index 08848050922e613f28a1d8f7e4b3ad4c34b463be..db284bff29dcceb39360d458cec3a194745955f8 100644 (file)
@@ -136,8 +136,12 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb,
 
 static inline void __tlb_reset_range(struct mmu_gather *tlb)
 {
-       tlb->start = TASK_SIZE;
-       tlb->end = 0;
+       if (tlb->fullmm) {
+               tlb->start = tlb->end = ~0;
+       } else {
+               tlb->start = TASK_SIZE;
+               tlb->end = 0;
+       }
 }
 
 /*
index cd62bf4289e9573301b25c5559ac91b412883dc6..88ea64e9a91ce5df933a84ff8cfdc4ec3e66b5d0 100644 (file)
@@ -67,8 +67,7 @@ int af_alg_unregister_type(const struct af_alg_type *type);
 int af_alg_release(struct socket *sock);
 int af_alg_accept(struct sock *sk, struct socket *newsock);
 
-int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
-                  int write);
+int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
 void af_alg_free_sg(struct af_alg_sgl *sgl);
 
 int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
index 1ea1b702fec2c49f5ca8d7443df6933ec88da175..d4110d5caa3efde4e0b44402ed8d4d8a00aed6cc 100644 (file)
@@ -7,14 +7,14 @@
 
 #include <dt-bindings/interrupt-controller/irq.h>
 
-/* interrupt specific cell 0 */
+/* interrupt specifier cell 0 */
 
 #define GIC_SPI 0
 #define GIC_PPI 1
 
 /*
  * Interrupt specifier cell 2.
- * The flaggs in irq.h are valid, plus those below.
+ * The flags in irq.h are valid, plus those below.
  */
 #define GIC_CPU_MASK_RAW(x) ((x) << 8)
 #define GIC_CPU_MASK_SIMPLE(num) GIC_CPU_MASK_RAW((1 << (num)) - 1)
index 856d381b1d5b83ce923be0f5d7da80ba420c1d14..d459cd17b477600cadf54ad2a227b66c5638112f 100644 (file)
@@ -147,8 +147,8 @@ void acpi_numa_arch_fixup(void);
 
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 /* Arch dependent functions for cpu hotplug support */
-int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu);
-int acpi_unmap_lsapic(int cpu);
+int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
+int acpi_unmap_cpu(int cpu);
 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
 
 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
index 8aded9ab2e4e89ddb66e5920c0819bade6fb0760..5735e7130d630f94fe62d3e0a7a7078434c8ee12 100644 (file)
@@ -34,7 +34,6 @@ struct blk_mq_hw_ctx {
        unsigned long           flags;          /* BLK_MQ_F_* flags */
 
        struct request_queue    *queue;
-       unsigned int            queue_num;
        struct blk_flush_queue  *fq;
 
        void                    *driver_data;
@@ -54,7 +53,7 @@ struct blk_mq_hw_ctx {
        unsigned long           dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
 
        unsigned int            numa_node;
-       unsigned int            cmd_size;       /* per-request extra data */
+       unsigned int            queue_num;
 
        atomic_t                nr_active;
 
@@ -195,13 +194,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
 
+int blk_mq_request_started(struct request *rq);
 void blk_mq_start_request(struct request *rq);
 void blk_mq_end_request(struct request *rq, int error);
 void __blk_mq_end_request(struct request *rq, int error);
 
 void blk_mq_requeue_request(struct request *rq);
 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
+void blk_mq_cancel_requeue_work(struct request_queue *q);
 void blk_mq_kick_requeue_list(struct request_queue *q);
+void blk_mq_abort_requeue_list(struct request_queue *q);
 void blk_mq_complete_request(struct request *rq);
 
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -212,6 +214,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
                void *priv);
+void blk_mq_unfreeze_queue(struct request_queue *q);
+void blk_mq_freeze_queue_start(struct request_queue *q);
 
 /*
  * Driver command data is immediately after the request. So subtract request
index 445d59231bc4cc242e2aa0e1ca073e3c320af56c..c294e3e25e37a50a953a4a0bb3cb1aa66ba904d9 100644 (file)
@@ -190,6 +190,7 @@ enum rq_flag_bits {
        __REQ_PM,               /* runtime pm request */
        __REQ_HASHED,           /* on IO scheduler merge hash */
        __REQ_MQ_INFLIGHT,      /* track inflight for MQ */
+       __REQ_NO_TIMEOUT,       /* requests may never expire */
        __REQ_NR_BITS,          /* stops here */
 };
 
@@ -243,5 +244,6 @@ enum rq_flag_bits {
 #define REQ_PM                 (1ULL << __REQ_PM)
 #define REQ_HASHED             (1ULL << __REQ_HASHED)
 #define REQ_MQ_INFLIGHT                (1ULL << __REQ_MQ_INFLIGHT)
+#define REQ_NO_TIMEOUT         (1ULL << __REQ_NO_TIMEOUT)
 
 #endif /* __LINUX_BLK_TYPES_H */
index 5d86416d35f2223da40fae9e9fd8e312d365026b..61b19c46bdb33d5fc2f4752df0c345350fa739e8 100644 (file)
@@ -87,8 +87,8 @@ struct ceph_osd_req_op {
                        struct ceph_osd_data osd_data;
                } extent;
                struct {
-                       __le32 name_len;
-                       __le32 value_len;
+                       u32 name_len;
+                       u32 value_len;
                        __u8 cmp_op;       /* CEPH_OSD_CMPXATTR_OP_* */
                        __u8 cmp_mode;     /* CEPH_OSD_CMPXATTR_MODE_* */
                        struct ceph_osd_data osd_data;
index a1c81f80978ee4b38bbbb1cdd781c7afa0362c5e..33063f872ee3cd698a233a0f0defa67aa1b6bd01 100644 (file)
@@ -215,7 +215,7 @@ static __always_inline void __read_once_size(volatile void *p, void *res, int si
        }
 }
 
-static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
 {
        switch (size) {
        case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
@@ -235,15 +235,15 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
 /*
  * Prevent the compiler from merging or refetching reads or writes. The
  * compiler is also forbidden from reordering successive instances of
- * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
+ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
  * compiler is aware of some particular ordering.  One way to make the
  * compiler aware of ordering is to put the two invocations of READ_ONCE,
- * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
+ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
  *
  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
  * data types like structs or unions. If the size of the accessed data
  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and ASSIGN_ONCE()  will fall back to memcpy and print a
+ * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a
  * compile-time warning.
  *
  * Their two major use cases are: (1) Mediating communication between
@@ -257,8 +257,8 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
 #define READ_ONCE(x) \
        ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
 
-#define ASSIGN_ONCE(val, x) \
-       ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
+#define WRITE_ONCE(x, val) \
+       ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
 
 #endif /* __KERNEL__ */
 
index f90c0282c11493f94a84095f61db94ac90974cc4..42efe13077b6c1b8dd139c6cea7a241e5d6b320d 100644 (file)
@@ -135,7 +135,7 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 #define FMODE_CAN_WRITE         ((__force fmode_t)0x40000)
 
 /* File was opened by fanotify and shouldn't generate fanotify events */
-#define FMODE_NONOTIFY         ((__force fmode_t)0x1000000)
+#define FMODE_NONOTIFY         ((__force fmode_t)0x4000000)
 
 /*
  * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
index 55b685719d522da32a37ba59013d1f029ff6a37e..09460d6d668208e3e4997b42f6dca05d0ef917a8 100644 (file)
@@ -11,6 +11,10 @@ extern void genl_unlock(void);
 extern int lockdep_genl_is_held(void);
 #endif
 
+/* for synchronisation between af_netlink and genetlink */
+extern atomic_t genl_sk_destructing_cnt;
+extern wait_queue_head_t genl_sk_destructing_waitq;
+
 /**
  * rcu_dereference_genl - rcu_dereference with debug checking
  * @p: The pointer to read, prior to dereferencing
index e3a1721c8354b98d6c613d536e4704ff52fa18d4..7c7695940dddeae9d3d22129ce4a14eaf70e1a5e 100644 (file)
@@ -228,7 +228,9 @@ struct i2c_client {
        struct device dev;              /* the device structure         */
        int irq;                        /* irq issued by device         */
        struct list_head detected;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
        i2c_slave_cb_t slave_cb;        /* callback for slave mode      */
+#endif
 };
 #define to_i2c_client(d) container_of(d, struct i2c_client, dev)
 
@@ -253,6 +255,7 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
 
 /* I2C slave support */
 
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
 enum i2c_slave_event {
        I2C_SLAVE_REQ_READ_START,
        I2C_SLAVE_REQ_READ_END,
@@ -269,6 +272,7 @@ static inline int i2c_slave_event(struct i2c_client *client,
 {
        return client->slave_cb(client, event, val);
 }
+#endif
 
 /**
  * struct i2c_board_info - template for device creation
@@ -404,8 +408,10 @@ struct i2c_algorithm {
        /* To determine what the adapter supports */
        u32 (*functionality) (struct i2c_adapter *);
 
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
        int (*reg_slave)(struct i2c_client *client);
        int (*unreg_slave)(struct i2c_client *client);
+#endif
 };
 
 /**
index 4f4eea8a62882914a86c11a084928a02225c6e0b..b9c7897dc5668c3fe59c30ab8505d334a6689168 100644 (file)
@@ -1017,6 +1017,15 @@ struct ieee80211_mmie {
        u8 mic[8];
 } __packed;
 
+/* Management MIC information element (IEEE 802.11w) for GMAC and CMAC-256 */
+struct ieee80211_mmie_16 {
+       u8 element_id;
+       u8 length;
+       __le16 key_id;
+       u8 sequence_number[6];
+       u8 mic[16];
+} __packed;
+
 struct ieee80211_vendor_ie {
        u8 element_id;
        u8 len;
@@ -1994,9 +2003,15 @@ enum ieee80211_key_len {
        WLAN_KEY_LEN_WEP40 = 5,
        WLAN_KEY_LEN_WEP104 = 13,
        WLAN_KEY_LEN_CCMP = 16,
+       WLAN_KEY_LEN_CCMP_256 = 32,
        WLAN_KEY_LEN_TKIP = 32,
        WLAN_KEY_LEN_AES_CMAC = 16,
        WLAN_KEY_LEN_SMS4 = 32,
+       WLAN_KEY_LEN_GCMP = 16,
+       WLAN_KEY_LEN_GCMP_256 = 32,
+       WLAN_KEY_LEN_BIP_CMAC_256 = 32,
+       WLAN_KEY_LEN_BIP_GMAC_128 = 16,
+       WLAN_KEY_LEN_BIP_GMAC_256 = 32,
 };
 
 #define IEEE80211_WEP_IV_LEN           4
@@ -2004,9 +2019,16 @@ enum ieee80211_key_len {
 #define IEEE80211_CCMP_HDR_LEN         8
 #define IEEE80211_CCMP_MIC_LEN         8
 #define IEEE80211_CCMP_PN_LEN          6
+#define IEEE80211_CCMP_256_HDR_LEN     8
+#define IEEE80211_CCMP_256_MIC_LEN     16
+#define IEEE80211_CCMP_256_PN_LEN      6
 #define IEEE80211_TKIP_IV_LEN          8
 #define IEEE80211_TKIP_ICV_LEN         4
 #define IEEE80211_CMAC_PN_LEN          6
+#define IEEE80211_GMAC_PN_LEN          6
+#define IEEE80211_GCMP_HDR_LEN         8
+#define IEEE80211_GCMP_MIC_LEN         16
+#define IEEE80211_GCMP_PN_LEN          6
 
 /* Public action codes */
 enum ieee80211_pub_actioncode {
@@ -2230,6 +2252,11 @@ enum ieee80211_sa_query_action {
 #define WLAN_CIPHER_SUITE_WEP104       0x000FAC05
 #define WLAN_CIPHER_SUITE_AES_CMAC     0x000FAC06
 #define WLAN_CIPHER_SUITE_GCMP         0x000FAC08
+#define WLAN_CIPHER_SUITE_GCMP_256     0x000FAC09
+#define WLAN_CIPHER_SUITE_CCMP_256     0x000FAC0A
+#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B
+#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C
+#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D
 
 #define WLAN_CIPHER_SUITE_SMS4         0x00147201
 
index 0a8ce762a47fded2031d83ec3535feaed062806a..a57bca2ea97e51058283dfa9305aff9f9244b12e 100644 (file)
@@ -50,24 +50,6 @@ extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __use
 typedef int br_should_route_hook_t(struct sk_buff *skb);
 extern br_should_route_hook_t __rcu *br_should_route_hook;
 
-#if IS_ENABLED(CONFIG_BRIDGE)
-int br_fdb_external_learn_add(struct net_device *dev,
-                             const unsigned char *addr, u16 vid);
-int br_fdb_external_learn_del(struct net_device *dev,
-                             const unsigned char *addr, u16 vid);
-#else
-static inline int br_fdb_external_learn_add(struct net_device *dev,
-                                           const unsigned char *addr, u16 vid)
-{
-       return 0;
-}
-static inline int br_fdb_external_learn_del(struct net_device *dev,
-                                           const unsigned char *addr, u16 vid)
-{
-       return 0;
-}
-#endif
-
 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
 int br_multicast_list_adjacent(struct net_device *dev,
                               struct list_head *br_ip_list);
index 515a35e2a48ab7e55d550fcd164466080773b3ce..b11b28a30b9ee78e1f600cc076736a72dc624c72 100644 (file)
@@ -78,9 +78,9 @@ static inline bool is_vlan_dev(struct net_device *dev)
         return dev->priv_flags & IFF_802_1Q_VLAN;
 }
 
-#define vlan_tx_tag_present(__skb)     ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
-#define vlan_tx_tag_get(__skb)         ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
-#define vlan_tx_tag_get_id(__skb)      ((__skb)->vlan_tci & VLAN_VID_MASK)
+#define skb_vlan_tag_present(__skb)    ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
+#define skb_vlan_tag_get(__skb)                ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+#define skb_vlan_tag_get_id(__skb)     ((__skb)->vlan_tci & VLAN_VID_MASK)
 
 /**
  *     struct vlan_pcpu_stats - VLAN percpu rx/tx stats
@@ -376,7 +376,7 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
 static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
 {
        skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
-                                       vlan_tx_tag_get(skb));
+                                       skb_vlan_tag_get(skb));
        if (likely(skb))
                skb->vlan_tci = 0;
        return skb;
@@ -393,7 +393,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
  */
 static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
 {
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                skb = __vlan_hwaccel_push_inside(skb);
        return skb;
 }
@@ -442,8 +442,8 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
 static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
                                         u16 *vlan_tci)
 {
-       if (vlan_tx_tag_present(skb)) {
-               *vlan_tci = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               *vlan_tci = skb_vlan_tag_get(skb);
                return 0;
        } else {
                *vlan_tci = 0;
@@ -472,27 +472,59 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
 /**
  * vlan_get_protocol - get protocol EtherType.
  * @skb: skbuff to query
+ * @type: first vlan protocol
+ * @depth: buffer to store length of eth and vlan tags in bytes
  *
  * Returns the EtherType of the packet, regardless of whether it is
  * vlan encapsulated (normal or hardware accelerated) or not.
  */
-static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
+static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
+                                        int *depth)
 {
-       __be16 protocol = 0;
-
-       if (vlan_tx_tag_present(skb) ||
-            skb->protocol != cpu_to_be16(ETH_P_8021Q))
-               protocol = skb->protocol;
-       else {
-               __be16 proto, *protop;
-               protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr,
-                                               h_vlan_encapsulated_proto),
-                                               sizeof(proto), &proto);
-               if (likely(protop))
-                       protocol = *protop;
+       unsigned int vlan_depth = skb->mac_len;
+
+       /* if type is 802.1Q/AD then the header should already be
+        * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
+        * ETH_HLEN otherwise
+        */
+       if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
+               if (vlan_depth) {
+                       if (WARN_ON(vlan_depth < VLAN_HLEN))
+                               return 0;
+                       vlan_depth -= VLAN_HLEN;
+               } else {
+                       vlan_depth = ETH_HLEN;
+               }
+               do {
+                       struct vlan_hdr *vh;
+
+                       if (unlikely(!pskb_may_pull(skb,
+                                                   vlan_depth + VLAN_HLEN)))
+                               return 0;
+
+                       vh = (struct vlan_hdr *)(skb->data + vlan_depth);
+                       type = vh->h_vlan_encapsulated_proto;
+                       vlan_depth += VLAN_HLEN;
+               } while (type == htons(ETH_P_8021Q) ||
+                        type == htons(ETH_P_8021AD));
        }
 
-       return protocol;
+       if (depth)
+               *depth = vlan_depth;
+
+       return type;
+}
+
+/**
+ * vlan_get_protocol - get protocol EtherType.
+ * @skb: skbuff to query
+ *
+ * Returns the EtherType of the packet, regardless of whether it is
+ * vlan encapsulated (normal or hardware accelerated) or not.
+ */
+static inline __be16 vlan_get_protocol(struct sk_buff *skb)
+{
+       return __vlan_get_protocol(skb, skb->protocol, NULL);
 }
 
 static inline void vlan_set_encap_proto(struct sk_buff *skb,
index c694e7baa621b3f76d220589e018dd767fa7bfd0..4d5169f5d7d1844d99d2af6c8d2cb89ad9879bdf 100644 (file)
@@ -52,6 +52,7 @@ struct ipv6_devconf {
        __s32           force_tllao;
        __s32           ndisc_notify;
        __s32           suppress_frag_ndisc;
+       __s32           accept_ra_mtu;
        void            *sysctl;
 };
 
@@ -124,6 +125,12 @@ struct ipv6_mc_socklist;
 struct ipv6_ac_socklist;
 struct ipv6_fl_socklist;
 
+struct inet6_cork {
+       struct ipv6_txoptions *opt;
+       u8 hop_limit;
+       u8 tclass;
+};
+
 /**
  * struct ipv6_pinfo - ipv6 private area
  *
@@ -216,11 +223,7 @@ struct ipv6_pinfo {
        struct ipv6_txoptions   *opt;
        struct sk_buff          *pktoptions;
        struct sk_buff          *rxpmtu;
-       struct {
-               struct ipv6_txoptions *opt;
-               u8 hop_limit;
-               u8 tclass;
-       } cork;
+       struct inet6_cork       cork;
 };
 
 /* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */
index 290db1269c4c7970ab016165c62d93eec53059b4..75ae2e2631fceaa27915f3d100b1f03244b17500 100644 (file)
  * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
  */
 
+/* Shifted versions of the command enable bits are be used if the command
+ * has no arguments (see kdb_check_flags). This allows commands, such as
+ * go, to have different permissions depending upon whether it is called
+ * with an argument.
+ */
+#define KDB_ENABLE_NO_ARGS_SHIFT 10
+
 typedef enum {
-       KDB_REPEAT_NONE = 0,    /* Do not repeat this command */
-       KDB_REPEAT_NO_ARGS,     /* Repeat the command without arguments */
-       KDB_REPEAT_WITH_ARGS,   /* Repeat the command including its arguments */
-} kdb_repeat_t;
+       KDB_ENABLE_ALL = (1 << 0), /* Enable everything */
+       KDB_ENABLE_MEM_READ = (1 << 1),
+       KDB_ENABLE_MEM_WRITE = (1 << 2),
+       KDB_ENABLE_REG_READ = (1 << 3),
+       KDB_ENABLE_REG_WRITE = (1 << 4),
+       KDB_ENABLE_INSPECT = (1 << 5),
+       KDB_ENABLE_FLOW_CTRL = (1 << 6),
+       KDB_ENABLE_SIGNAL = (1 << 7),
+       KDB_ENABLE_REBOOT = (1 << 8),
+       /* User exposed values stop here, all remaining flags are
+        * exclusively used to describe a commands behaviour.
+        */
+
+       KDB_ENABLE_ALWAYS_SAFE = (1 << 9),
+       KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1,
+
+       KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ
+                                     << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE
+                                      << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ
+                                     << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE
+                                      << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT
+                                    << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL
+                                      << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL
+                                   << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT
+                                   << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE
+                                        << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT,
+
+       KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */
+       KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */
+} kdb_cmdflags_t;
 
 typedef int (*kdb_func_t)(int, const char **);
 
@@ -62,6 +105,7 @@ extern atomic_t kdb_event;
 #define KDB_BADLENGTH  (-19)
 #define KDB_NOBP       (-20)
 #define KDB_BADADDR    (-21)
+#define KDB_NOPERM     (-22)
 
 /*
  * kdb_diemsg
@@ -146,17 +190,17 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
 
 /* Dynamic kdb shell command registration */
 extern int kdb_register(char *, kdb_func_t, char *, char *, short);
-extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
-                              short, kdb_repeat_t);
+extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
+                             short, kdb_cmdflags_t);
 extern int kdb_unregister(char *);
 #else /* ! CONFIG_KGDB_KDB */
 static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
 static inline void kdb_init(int level) {}
 static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
                               char *help, short minlen) { return 0; }
-static inline int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage,
-                                     char *help, short minlen,
-                                     kdb_repeat_t repeat) { return 0; }
+static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
+                                    char *help, short minlen,
+                                    kdb_cmdflags_t flags) { return 0; }
 static inline int kdb_unregister(char *cmd) { return 0; }
 #endif /* CONFIG_KGDB_KDB */
 enum {
index 5449d2f4a1efa51203ecd27237ddf9899af2d59a..64ce58bee6f5a74356f612a48730c35455ae6662 100644 (file)
@@ -176,7 +176,7 @@ extern int _cond_resched(void);
  */
 # define might_sleep() \
        do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
-# define sched_annotate_sleep()        __set_current_state(TASK_RUNNING)
+# define sched_annotate_sleep()        (current->task_state_change = 0)
 #else
   static inline void ___might_sleep(const char *file, int line,
                                   int preempt_offset) { }
index 2d182413b1db5bbf3b9afa2c15dd674d2b817b4e..91f705de2c0be743dac06dac565817516058fc56 100644 (file)
@@ -231,6 +231,7 @@ enum {
        ATA_FLAG_SW_ACTIVITY    = (1 << 22), /* driver supports sw activity
                                              * led */
        ATA_FLAG_NO_DIPM        = (1 << 23), /* host not happy with DIPM */
+       ATA_FLAG_LOWTAG         = (1 << 24), /* host wants lowest available tag */
 
        /* bits 24:31 of ap->flags are reserved for LLD specific flags */
 
@@ -422,6 +423,7 @@ enum {
        ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19),    /* don't use queued TRIM */
        ATA_HORKAGE_NOLPM       = (1 << 20),    /* don't use LPM */
        ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21),  /* some WDs have broken LPM */
+       ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
 
         /* DMA mask for user DMA control: User visible values; DO NOT
            renumber */
index 5d10ae364b5eb5d0a50d220938dd2f020a8cafb2..f266661d2666596d808bbe8756c91239f5b8e6dc 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef _LINUX_LIST_NULLS_H
 #define _LINUX_LIST_NULLS_H
 
+#include <linux/poison.h>
+#include <linux/const.h>
+
 /*
  * Special version of lists, where end of list is not a NULL pointer,
  * but a 'nulls' marker, which can have many different values.
@@ -21,8 +24,9 @@ struct hlist_nulls_head {
 struct hlist_nulls_node {
        struct hlist_nulls_node *next, **pprev;
 };
+#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
 #define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
-       ((ptr)->first = (struct hlist_nulls_node *) (1UL | (((long)nulls) << 1)))
+       ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
 
 #define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
 /**
index ce5dda8958fe83af981a19669662fe808581c261..b1fd675fa36f36a7ec5b68d2ba9d76684ad3bcfd 100644 (file)
@@ -59,6 +59,7 @@ enum s2mps13_reg {
        S2MPS13_REG_B6CTRL,
        S2MPS13_REG_B6OUT,
        S2MPS13_REG_B7CTRL,
+       S2MPS13_REG_B7SW,
        S2MPS13_REG_B7OUT,
        S2MPS13_REG_B8CTRL,
        S2MPS13_REG_B8OUT,
@@ -102,6 +103,7 @@ enum s2mps13_reg {
        S2MPS13_REG_L26CTRL,
        S2MPS13_REG_L27CTRL,
        S2MPS13_REG_L28CTRL,
+       S2MPS13_REG_L29CTRL,
        S2MPS13_REG_L30CTRL,
        S2MPS13_REG_L31CTRL,
        S2MPS13_REG_L32CTRL,
index 575a86c7fcbd2d2b4168d25c3ee20c275e84fc0e..f742b6717d52af2d83aaebaa6d5ff22a72cf786b 100644 (file)
@@ -50,6 +50,8 @@ enum {
        STMPE_IDX_GPEDR_MSB,
        STMPE_IDX_GPRER_LSB,
        STMPE_IDX_GPFER_LSB,
+       STMPE_IDX_GPPUR_LSB,
+       STMPE_IDX_GPPDR_LSB,
        STMPE_IDX_GPAFR_U_MSB,
        STMPE_IDX_IEGPIOR_LSB,
        STMPE_IDX_ISGPIOR_LSB,
@@ -113,24 +115,6 @@ extern int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins,
 extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks);
 extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
 
-struct matrix_keymap_data;
-
-/**
- * struct stmpe_keypad_platform_data - STMPE keypad platform data
- * @keymap_data: key map table and size
- * @debounce_ms: debounce interval, in ms.  Maximum is
- *              %STMPE_KEYPAD_MAX_DEBOUNCE.
- * @scan_count: number of key scanning cycles to confirm key data.
- *             Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT.
- * @no_autorepeat: disable key autorepeat
- */
-struct stmpe_keypad_platform_data {
-       const struct matrix_keymap_data *keymap_data;
-       unsigned int debounce_ms;
-       unsigned int scan_count;
-       bool no_autorepeat;
-};
-
 #define STMPE_GPIO_NOREQ_811_TOUCH     (0xf0)
 
 /**
@@ -199,7 +183,6 @@ struct stmpe_ts_platform_data {
  * @irq_gpio: gpio number over which irq will be requested (significant only if
  *           irq_over_gpio is true)
  * @gpio: GPIO-specific platform data
- * @keypad: keypad-specific platform data
  * @ts: touchscreen-specific platform data
  */
 struct stmpe_platform_data {
@@ -212,7 +195,6 @@ struct stmpe_platform_data {
        int autosleep_timeout;
 
        struct stmpe_gpio_platform_data *gpio;
-       struct stmpe_keypad_platform_data *keypad;
        struct stmpe_ts_platform_data *ts;
 };
 
index 64d25941b329918e2c8f232e02431dabfcbc3c8f..7b6d4e9ff603828181239f5f64cbdf3a6d2cd282 100644 (file)
@@ -71,6 +71,7 @@ enum {
 
        /*master notify fw on finish for slave's flr*/
        MLX4_CMD_INFORM_FLR_DONE = 0x5b,
+       MLX4_CMD_VIRT_PORT_MAP   = 0x5c,
        MLX4_CMD_GET_OP_REQ      = 0x59,
 
        /* TPT commands */
@@ -165,9 +166,15 @@ enum {
 };
 
 enum {
-       MLX4_CMD_TIME_CLASS_A   = 10000,
-       MLX4_CMD_TIME_CLASS_B   = 10000,
-       MLX4_CMD_TIME_CLASS_C   = 10000,
+       MLX4_CMD_TIME_CLASS_A   = 60000,
+       MLX4_CMD_TIME_CLASS_B   = 60000,
+       MLX4_CMD_TIME_CLASS_C   = 60000,
+};
+
+enum {
+       /* virtual to physical port mapping opcode modifiers */
+       MLX4_GET_PORT_VIRT2PHY = 0x0,
+       MLX4_SET_PORT_VIRT2PHY = 0x1,
 };
 
 enum {
@@ -279,6 +286,8 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
 int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
 int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
                              struct mlx4_config_dev_params *params);
+void mlx4_cmd_wake_completions(struct mlx4_dev *dev);
+void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev);
 /*
  * mlx4_get_slave_default_vlan -
  * return true if VST ( default vlan)
@@ -288,5 +297,6 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
                                 u16 *vlan, u8 *qos);
 
 #define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
+#define COMM_CHAN_EVENT_INTERNAL_ERR (1 << 17)
 
 #endif /* MLX4_CMD_H */
index f1e41b33462fc0f261edf58f67005dfa0605bcb1..c116cb02475c86f3317b7e3cac1c529deb165618 100644 (file)
@@ -70,6 +70,7 @@ enum {
        MLX4_FLAG_SLAVE         = 1 << 3,
        MLX4_FLAG_SRIOV         = 1 << 4,
        MLX4_FLAG_OLD_REG_MAC   = 1 << 6,
+       MLX4_FLAG_BONDED        = 1 << 7
 };
 
 enum {
@@ -97,7 +98,7 @@ enum {
        MLX4_MAX_NUM_PF         = 16,
        MLX4_MAX_NUM_VF         = 126,
        MLX4_MAX_NUM_VF_P_PORT  = 64,
-       MLX4_MFUNC_MAX          = 80,
+       MLX4_MFUNC_MAX          = 128,
        MLX4_MAX_EQ_NUM         = 1024,
        MLX4_MFUNC_EQ_NUM       = 4,
        MLX4_MFUNC_MAX_EQES     = 8,
@@ -200,7 +201,9 @@ enum {
        MLX4_DEV_CAP_FLAG2_CONFIG_DEV           = 1LL <<  16,
        MLX4_DEV_CAP_FLAG2_SYS_EQS              = 1LL <<  17,
        MLX4_DEV_CAP_FLAG2_80_VFS               = 1LL <<  18,
-       MLX4_DEV_CAP_FLAG2_FS_A0                = 1LL <<  19
+       MLX4_DEV_CAP_FLAG2_FS_A0                = 1LL <<  19,
+       MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20,
+       MLX4_DEV_CAP_FLAG2_PORT_REMAP           = 1LL <<  21
 };
 
 enum {
@@ -208,6 +211,10 @@ enum {
        MLX4_QUERY_FUNC_FLAGS_A0_RES_QP         = 1LL << 1
 };
 
+enum {
+       MLX4_VF_CAP_FLAG_RESET                  = 1 << 0
+};
+
 /* bit enums for an 8-bit flags field indicating special use
  * QPs which require special handling in qp_reserve_range.
  * Currently, this only includes QPs used by the ETH interface,
@@ -248,9 +255,14 @@ enum {
        MLX4_BMME_FLAG_TYPE_2_WIN       = 1 <<  9,
        MLX4_BMME_FLAG_RESERVED_LKEY    = 1 << 10,
        MLX4_BMME_FLAG_FAST_REG_WR      = 1 << 11,
+       MLX4_BMME_FLAG_PORT_REMAP       = 1 << 24,
        MLX4_BMME_FLAG_VSD_INIT2RTR     = 1 << 28,
 };
 
+enum {
+       MLX4_FLAG_PORT_REMAP            = MLX4_BMME_FLAG_PORT_REMAP
+};
+
 enum mlx4_event {
        MLX4_EVENT_TYPE_COMP               = 0x00,
        MLX4_EVENT_TYPE_PATH_MIG           = 0x01,
@@ -276,6 +288,7 @@ enum mlx4_event {
        MLX4_EVENT_TYPE_FATAL_WARNING      = 0x1b,
        MLX4_EVENT_TYPE_FLR_EVENT          = 0x1c,
        MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
+       MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT  = 0x3e,
        MLX4_EVENT_TYPE_NONE               = 0xff,
 };
 
@@ -284,6 +297,11 @@ enum {
        MLX4_PORT_CHANGE_SUBTYPE_ACTIVE = 4
 };
 
+enum {
+       MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE          = 1,
+       MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE  = 2,
+};
+
 enum {
        MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
 };
@@ -411,6 +429,16 @@ enum {
        MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK        = 1 << 4,
 };
 
+enum {
+       MLX4_DEVICE_STATE_UP                    = 1 << 0,
+       MLX4_DEVICE_STATE_INTERNAL_ERROR        = 1 << 1,
+};
+
+enum {
+       MLX4_INTERFACE_STATE_UP         = 1 << 0,
+       MLX4_INTERFACE_STATE_DELETION   = 1 << 1,
+};
+
 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
                             MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
 
@@ -535,6 +563,7 @@ struct mlx4_caps {
        u8                      alloc_res_qp_mask;
        u32                     dmfs_high_rate_qpn_base;
        u32                     dmfs_high_rate_qpn_range;
+       u32                     vf_caps;
 };
 
 struct mlx4_buf_list {
@@ -744,8 +773,23 @@ struct mlx4_vf_dev {
        u8                      n_ports;
 };
 
-struct mlx4_dev {
+struct mlx4_dev_persistent {
        struct pci_dev         *pdev;
+       struct mlx4_dev        *dev;
+       int                     nvfs[MLX4_MAX_PORTS + 1];
+       int                     num_vfs;
+       enum mlx4_port_type curr_port_type[MLX4_MAX_PORTS + 1];
+       enum mlx4_port_type curr_port_poss_type[MLX4_MAX_PORTS + 1];
+       struct work_struct      catas_work;
+       struct workqueue_struct *catas_wq;
+       struct mutex    device_state_mutex; /* protect HW state */
+       u8              state;
+       struct mutex    interface_state_mutex; /* protect SW state */
+       u8      interface_state;
+};
+
+struct mlx4_dev {
+       struct mlx4_dev_persistent *persist;
        unsigned long           flags;
        unsigned long           num_slaves;
        struct mlx4_caps        caps;
@@ -754,13 +798,11 @@ struct mlx4_dev {
        struct radix_tree_root  qp_table_tree;
        u8                      rev_id;
        char                    board_id[MLX4_BOARD_ID_LEN];
-       int                     num_vfs;
        int                     numa_node;
        int                     oper_log_mgm_entry_size;
        u64                     regid_promisc_array[MLX4_MAX_PORTS + 1];
        u64                     regid_allmulti_array[MLX4_MAX_PORTS + 1];
        struct mlx4_vf_dev     *dev_vfs;
-       int                     nvfs[MLX4_MAX_PORTS + 1];
 };
 
 struct mlx4_eqe {
@@ -832,6 +874,11 @@ struct mlx4_eqe {
                                } __packed tbl_change_info;
                        } params;
                } __packed port_mgmt_change;
+               struct {
+                       u8 reserved[3];
+                       u8 port;
+                       u32 reserved1[5];
+               } __packed bad_cable;
        }                       event;
        u8                      slave_id;
        u8                      reserved3[2];
@@ -1338,6 +1385,8 @@ int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port);
 int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port);
 
 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port);
+int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis);
+int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2);
 int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
index 022055c8fb2649456b19197f8417f8011ee2dc16..9553a73d2049e425bc72bf4fbb7c151ffc9bbe43 100644 (file)
@@ -49,6 +49,10 @@ enum mlx4_dev_event {
        MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
 };
 
+enum {
+       MLX4_INTFF_BONDING      = 1 << 0
+};
+
 struct mlx4_interface {
        void *                  (*add)   (struct mlx4_dev *dev);
        void                    (*remove)(struct mlx4_dev *dev, void *context);
@@ -57,11 +61,26 @@ struct mlx4_interface {
        void *                  (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
        struct list_head        list;
        enum mlx4_protocol      protocol;
+       int                     flags;
 };
 
 int mlx4_register_interface(struct mlx4_interface *intf);
 void mlx4_unregister_interface(struct mlx4_interface *intf);
 
+int mlx4_bond(struct mlx4_dev *dev);
+int mlx4_unbond(struct mlx4_dev *dev);
+static inline int mlx4_is_bonded(struct mlx4_dev *dev)
+{
+       return !!(dev->flags & MLX4_FLAG_BONDED);
+}
+
+struct mlx4_port_map {
+       u8      port1;
+       u8      port2;
+};
+
+int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
+
 void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
 
 static inline u64 mlx4_mac_to_u64(u8 *addr)
index 467ccdf94c981f7a7f90b04b6d2855c4b4a56d18..2bbc62aa818a374d1c488f2eecf4232230bd3f4e 100644 (file)
@@ -96,6 +96,7 @@ enum {
        MLX4_QP_BIT_RRE                         = 1 << 15,
        MLX4_QP_BIT_RWE                         = 1 << 14,
        MLX4_QP_BIT_RAE                         = 1 << 13,
+       MLX4_QP_BIT_FPP                         = 1 <<  3,
        MLX4_QP_BIT_RIC                         = 1 <<  4,
 };
 
index f80d0194c9bc2fa67b73eadbf93ac65e62434000..dd5ea3016fc4e854ded6b1e7c2e096224d83317f 100644 (file)
@@ -1070,6 +1070,7 @@ static inline int page_mapped(struct page *page)
 #define VM_FAULT_WRITE 0x0008  /* Special case for get_user_pages */
 #define VM_FAULT_HWPOISON 0x0010       /* Hit poisoned small page */
 #define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
+#define VM_FAULT_SIGSEGV 0x0040
 
 #define VM_FAULT_NOPAGE        0x0100  /* ->fault installed the pte, not return page */
 #define VM_FAULT_LOCKED        0x0200  /* ->fault locked the returned page */
@@ -1078,8 +1079,9 @@ static inline int page_mapped(struct page *page)
 
 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
 
-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
-                        VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
+#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
+                        VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
+                        VM_FAULT_FALLBACK)
 
 /* Encode hstate index for a hwpoisoned large page */
 #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
@@ -1952,7 +1954,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
 #if VM_GROWSUP
 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
 #else
-  #define expand_upwards(vma, address) do { } while (0)
+  #define expand_upwards(vma, address) (0)
 #endif
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
index 375af80bde7d7c90bb1c09efb3edc297dbe4d864..f767a0de611f8a726df957c6595d84df92d8efb3 100644 (file)
@@ -137,6 +137,7 @@ struct sdhci_host {
 #define SDHCI_SDR104_NEEDS_TUNING (1<<10)      /* SDR104/HS200 needs tuning */
 #define SDHCI_USING_RETUNING_TIMER (1<<11)     /* Host is using a retuning timer for the card */
 #define SDHCI_USE_64_BIT_DMA   (1<<12) /* Use 64-bit DMA */
+#define SDHCI_HS400_TUNING     (1<<13) /* Tuning for HS400 */
 
        unsigned int version;   /* SDHCI spec. version */
 
index ebfb0e153c6a78de5cc26162b2f7e904a9890fe1..b653d7c0a05a0abbaf5e1b4759a3f79b398c8b25 100644 (file)
@@ -444,7 +444,7 @@ extern void __module_put_and_exit(struct module *mod, long code)
 #define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
 
 #ifdef CONFIG_MODULE_UNLOAD
-unsigned long module_refcount(struct module *mod);
+int module_refcount(struct module *mod);
 void __symbol_put(const char *symbol);
 #define symbol_put(x) __symbol_put(VMLINUX_SYMBOL_STR(x))
 void symbol_put_addr(void *addr);
index 7eeb9bbfb816f3afc11662f05e5dde3d297db489..f7556261fe3c54adb52b28789b7cb7b19b280b13 100644 (file)
@@ -26,7 +26,7 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
 void *module_alloc(unsigned long size);
 
 /* Free memory returned from module_alloc. */
-void module_free(struct module *mod, void *module_region);
+void module_memfree(void *module_region);
 
 /*
  * Apply the given relocation to the (simplified) ELF.  Return -error
@@ -82,4 +82,6 @@ int module_finalize(const Elf_Ehdr *hdr,
 /* Any cleanup needed when module leaves. */
 void module_arch_cleanup(struct module *mod);
 
+/* Any cleanup before freeing mod->module_init */
+void module_arch_freeing_init(struct module *mod);
 #endif
index 8e30685affeb3a40b00fffbdbdcb3edbb5b10ead..7d59dc6ab78922cc8c15b84ba95736752f8139e6 100644 (file)
@@ -66,6 +66,7 @@ enum {
        NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
        NETIF_F_HW_L2FW_DOFFLOAD_BIT,   /* Allow L2 Forwarding in Hardware */
        NETIF_F_BUSY_POLL_BIT,          /* Busy poll */
+       NETIF_F_HW_SWITCH_OFFLOAD_BIT,  /* HW switch offload */
 
        /*
         * Add your fresh new feature above and remember to update
@@ -124,6 +125,7 @@ enum {
 #define NETIF_F_HW_VLAN_STAG_TX        __NETIF_F(HW_VLAN_STAG_TX)
 #define NETIF_F_HW_L2FW_DOFFLOAD       __NETIF_F(HW_L2FW_DOFFLOAD)
 #define NETIF_F_BUSY_POLL      __NETIF_F(BUSY_POLL)
+#define NETIF_F_HW_SWITCH_OFFLOAD      __NETIF_F(HW_SWITCH_OFFLOAD)
 
 /* Features valid for ethtool to change */
 /* = all defined minus driver/device-class-related */
@@ -159,7 +161,9 @@ enum {
  */
 #define NETIF_F_ONE_FOR_ALL    (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
                                 NETIF_F_SG | NETIF_F_HIGHDMA |         \
-                                NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
+                                NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED | \
+                                NETIF_F_HW_SWITCH_OFFLOAD)
+
 /*
  * If one device doesn't support one of these features, then disable it
  * for all in netdev_increment_features.
index 679e6e90aa4c2b1a2e9ea9c3fde52e12ea389b91..d115256ed5a209fe28ce971bdbde7ca421d048aa 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/netdev_features.h>
 #include <linux/neighbour.h>
 #include <uapi/linux/netdevice.h>
+#include <uapi/linux/if_bonding.h>
 
 struct netpoll_info;
 struct device;
@@ -643,39 +644,40 @@ struct rps_dev_flow_table {
 /*
  * The rps_sock_flow_table contains mappings of flows to the last CPU
  * on which they were processed by the application (set in recvmsg).
+ * Each entry is a 32bit value. Upper part is the high order bits
+ * of flow hash, lower part is cpu number.
+ * rps_cpu_mask is used to partition the space, depending on number of
+ * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
+ * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f,
+ * meaning we use 32-6=26 bits for the hash.
  */
 struct rps_sock_flow_table {
-       unsigned int mask;
-       u16 ents[0];
+       u32     mask;
+
+       u32     ents[0] ____cacheline_aligned_in_smp;
 };
-#define        RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
-    ((_num) * sizeof(u16)))
+#define        RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
 
 #define RPS_NO_CPU 0xffff
 
+extern u32 rps_cpu_mask;
+extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
+
 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
                                        u32 hash)
 {
        if (table && hash) {
-               unsigned int cpu, index = hash & table->mask;
+               unsigned int index = hash & table->mask;
+               u32 val = hash & ~rps_cpu_mask;
 
                /* We only give a hint, preemption can change cpu under us */
-               cpu = raw_smp_processor_id();
+               val |= raw_smp_processor_id();
 
-               if (table->ents[index] != cpu)
-                       table->ents[index] = cpu;
+               if (table->ents[index] != val)
+                       table->ents[index] = val;
        }
 }
 
-static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
-                                      u32 hash)
-{
-       if (table && hash)
-               table->ents[hash & table->mask] = RPS_NO_CPU;
-}
-
-extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
-
 #ifdef CONFIG_RFS_ACCEL
 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
                         u16 filter_id);
@@ -852,11 +854,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *     3. Update dev->stats asynchronously and atomically, and define
  *        neither operation.
  *
- * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
+ * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
  *     If device support VLAN filtering this function is called when a
  *     VLAN id is registered.
  *
- * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
  *     If device support VLAN filtering this function is called when a
  *     VLAN id is unregistered.
  *
@@ -1154,13 +1156,15 @@ struct net_device_ops {
                                                int idx);
 
        int                     (*ndo_bridge_setlink)(struct net_device *dev,
-                                                     struct nlmsghdr *nlh);
+                                                     struct nlmsghdr *nlh,
+                                                     u16 flags);
        int                     (*ndo_bridge_getlink)(struct sk_buff *skb,
                                                      u32 pid, u32 seq,
                                                      struct net_device *dev,
                                                      u32 filter_mask);
        int                     (*ndo_bridge_dellink)(struct net_device *dev,
-                                                     struct nlmsghdr *nlh);
+                                                     struct nlmsghdr *nlh,
+                                                     u16 flags);
        int                     (*ndo_change_carrier)(struct net_device *dev,
                                                      bool new_carrier);
        int                     (*ndo_get_phys_port_id)(struct net_device *dev,
@@ -1514,6 +1518,8 @@ struct net_device {
        struct list_head        napi_list;
        struct list_head        unreg_list;
        struct list_head        close_list;
+       struct list_head        ptype_all;
+       struct list_head        ptype_specific;
 
        struct {
                struct list_head upper;
@@ -1969,7 +1975,7 @@ struct offload_callbacks {
        struct sk_buff          *(*gso_segment)(struct sk_buff *skb,
                                                netdev_features_t features);
        struct sk_buff          **(*gro_receive)(struct sk_buff **head,
-                                              struct sk_buff *skb);
+                                                struct sk_buff *skb);
        int                     (*gro_complete)(struct sk_buff *skb, int nhoff);
 };
 
@@ -1979,10 +1985,21 @@ struct packet_offload {
        struct list_head         list;
 };
 
+struct udp_offload;
+
+struct udp_offload_callbacks {
+       struct sk_buff          **(*gro_receive)(struct sk_buff **head,
+                                                struct sk_buff *skb,
+                                                struct udp_offload *uoff);
+       int                     (*gro_complete)(struct sk_buff *skb,
+                                               int nhoff,
+                                               struct udp_offload *uoff);
+};
+
 struct udp_offload {
        __be16                   port;
        u8                       ipproto;
-       struct offload_callbacks callbacks;
+       struct udp_offload_callbacks callbacks;
 };
 
 /* often modified stats are per cpu, other are shared (netdev->stats) */
@@ -2041,6 +2058,7 @@ struct pcpu_sw_netstats {
 #define NETDEV_RESEND_IGMP     0x0016
 #define NETDEV_PRECHANGEMTU    0x0017 /* notify before mtu change happened */
 #define NETDEV_CHANGEINFODATA  0x0018
+#define NETDEV_BONDING_INFO    0x0019
 
 int register_netdevice_notifier(struct notifier_block *nb);
 int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2085,7 +2103,7 @@ extern rwlock_t                           dev_base_lock;          /* Device list lock */
        list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
 #define for_each_netdev_in_bond_rcu(bond, slave)       \
                for_each_netdev_rcu(&init_net, slave)   \
-                       if (netdev_master_upper_dev_get_rcu(slave) == bond)
+                       if (netdev_master_upper_dev_get_rcu(slave) == (bond))
 #define net_device_entry(lh)   list_entry(lh, struct net_device, dev_list)
 
 static inline struct net_device *next_net_device(struct net_device *dev)
@@ -2303,6 +2321,21 @@ do {                                                                     \
                                           compute_pseudo(skb, proto)); \
 } while (0)
 
+static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+                                          int start, int offset)
+{
+       __wsum delta;
+
+       BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
+
+       delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
+
+       /* Adjust skb->csum since we changed the packet */
+       skb->csum = csum_add(skb->csum, delta);
+       NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+}
+
+
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
                                  unsigned short type,
                                  const void *daddr, const void *saddr,
@@ -3464,6 +3497,19 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
                                    netdev_features_t features);
 
+struct netdev_bonding_info {
+       ifslave slave;
+       ifbond  master;
+};
+
+struct netdev_notifier_bonding_info {
+       struct netdev_notifier_info info; /* must be first */
+       struct netdev_bonding_info  bonding_info;
+};
+
+void netdev_bonding_info_change(struct net_device *dev,
+                               struct netdev_bonding_info *bonding_info);
+
 static inline
 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
 {
index 1e37fbb78f7afbc57b8ab3c076d66ce7555f3cb0..ddea982355f3be93947dae0b2e30f9e00209920e 100644 (file)
@@ -74,6 +74,9 @@ struct nfs_client {
        /* idmapper */
        struct idmap *          cl_idmap;
 
+       /* Client owner identifier */
+       const char *            cl_owner_id;
+
        /* Our own IP address, as a null-terminated string.
         * This is used to generate the mv0 callback address.
         */
index 853698c721f7d1547df181a4fbfc904a67758f72..76200984d1e22081954234d49f32653e18478717 100644 (file)
@@ -85,11 +85,6 @@ static inline void oom_killer_enable(void)
        oom_killer_disabled = false;
 }
 
-static inline bool oom_gfp_allowed(gfp_t gfp_mask)
-{
-       return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY);
-}
-
 extern struct task_struct *find_lock_task_mm(struct task_struct *p);
 
 static inline bool task_will_free_mem(struct task_struct *task)
index 360a966a97a5807f7ff13441748c0c93850ff7e1..9603094ed59b2adb5defa9eb93a955c5ce93e543 100644 (file)
@@ -175,6 +175,8 @@ enum pci_dev_flags {
        PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4),
        /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
        PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
+       /* Do not use bus resets for device */
+       PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
 };
 
 enum pci_irq_reroute_variant {
@@ -1065,6 +1067,7 @@ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
 void pci_bus_assign_resources(const struct pci_bus *bus);
 void pci_bus_size_bridges(struct pci_bus *bus);
 int pci_claim_resource(struct pci_dev *, int);
+int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
 void pci_assign_unassigned_resources(void);
 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
index 486e84ccb1f92545ec0d4f74aaa153abf0ff8049..664de5a4ec4672d9bbd9d1059a2c7005591329b3 100644 (file)
@@ -79,11 +79,6 @@ struct perf_branch_stack {
        struct perf_branch_entry        entries[0];
 };
 
-struct perf_regs {
-       __u64           abi;
-       struct pt_regs  *regs;
-};
-
 struct task_struct;
 
 /*
@@ -455,11 +450,6 @@ struct perf_event {
 #endif /* CONFIG_PERF_EVENTS */
 };
 
-enum perf_event_context_type {
-       task_context,
-       cpu_context,
-};
-
 /**
  * struct perf_event_context - event context structure
  *
@@ -467,7 +457,6 @@ enum perf_event_context_type {
  */
 struct perf_event_context {
        struct pmu                      *pmu;
-       enum perf_event_context_type    type;
        /*
         * Protect the states of the events in the list,
         * nr_active, and the list:
@@ -610,7 +599,14 @@ struct perf_sample_data {
                u32     reserved;
        }                               cpu_entry;
        struct perf_callchain_entry     *callchain;
+
+       /*
+        * regs_user may point to task_pt_regs or to regs_user_copy, depending
+        * on arch details.
+        */
        struct perf_regs                regs_user;
+       struct pt_regs                  regs_user_copy;
+
        struct perf_regs                regs_intr;
        u64                             stack_user_size;
 } ____cacheline_aligned;
index 3c73d5fe18be4b950628f82234b7ba58855b2c29..a5f98d53d7325b0358bd45b7b7406b4f02fef6d5 100644 (file)
@@ -1,11 +1,19 @@
 #ifndef _LINUX_PERF_REGS_H
 #define _LINUX_PERF_REGS_H
 
+struct perf_regs {
+       __u64           abi;
+       struct pt_regs  *regs;
+};
+
 #ifdef CONFIG_HAVE_PERF_REGS
 #include <asm/perf_regs.h>
 u64 perf_reg_value(struct pt_regs *regs, int idx);
 int perf_reg_validate(u64 mask);
 u64 perf_reg_abi(struct task_struct *task);
+void perf_get_regs_user(struct perf_regs *regs_user,
+                       struct pt_regs *regs,
+                       struct pt_regs *regs_user_copy);
 #else
 static inline u64 perf_reg_value(struct pt_regs *regs, int idx)
 {
@@ -21,5 +29,13 @@ static inline u64 perf_reg_abi(struct task_struct *task)
 {
        return PERF_SAMPLE_REGS_ABI_NONE;
 }
+
+static inline void perf_get_regs_user(struct perf_regs *regs_user,
+                                     struct pt_regs *regs,
+                                     struct pt_regs *regs_user_copy)
+{
+       regs_user->regs = task_pt_regs(current);
+       regs_user->abi = perf_reg_abi(current);
+}
 #endif /* CONFIG_HAVE_PERF_REGS */
 #endif /* _LINUX_PERF_REGS_H */
index 22af8f8f5802bfb7e82b8c837d994d52a87a2b08..685809835b5c0edc5b80a3398e849c3a53d5b17c 100644 (file)
@@ -327,6 +327,8 @@ struct phy_c45_device_ids {
  * c45_ids: 802.3-c45 Device Identifers if is_c45.
  * is_c45:  Set to true if this phy uses clause 45 addressing.
  * is_internal: Set to true if this phy is internal to a MAC.
+ * has_fixups: Set to true if this phy has fixups/quirks.
+ * suspended: Set to true if this phy has been suspended successfully.
  * state: state of the PHY for management purposes
  * dev_flags: Device-specific flags used by the PHY driver.
  * addr: Bus address of PHY
@@ -364,6 +366,7 @@ struct phy_device {
        bool is_c45;
        bool is_internal;
        bool has_fixups;
+       bool suspended;
 
        enum phy_state state;
 
@@ -565,6 +568,15 @@ struct phy_driver {
        void (*write_mmd_indirect)(struct phy_device *dev, int ptrad,
                                   int devnum, int regnum, u32 val);
 
+       /* Get the size and type of the eeprom contained within a plug-in
+        * module */
+       int (*module_info)(struct phy_device *dev,
+                          struct ethtool_modinfo *modinfo);
+
+       /* Get the eeprom information from the plug-in module */
+       int (*module_eeprom)(struct phy_device *dev,
+                            struct ethtool_eeprom *ee, u8 *data);
+
        struct device_driver driver;
 };
 #define to_phy_driver(d) container_of(d, struct phy_driver, driver)
index e9e6cfbfbb589d0393060e2fed0422ec402dd612..eb7d4a135a9ea71364105c0bade762b5f06b67da 100644 (file)
@@ -66,7 +66,7 @@ enum omap_control_usb_mode {
 #define        OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF      0x0
 
 #define        OMAP_CTRL_PCIE_PCS_MASK                 0xff
-#define        OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT    0x8
+#define        OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT    16
 
 #define OMAP_CTRL_USB2_PHY_PD          BIT(28)
 
@@ -79,7 +79,7 @@ enum omap_control_usb_mode {
 void omap_control_phy_power(struct device *dev, int on);
 void omap_control_usb_set_mode(struct device *dev,
                               enum omap_control_usb_mode mode);
-void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay);
+void omap_control_pcie_pcs(struct device *dev, u8 delay);
 #else
 
 static inline void omap_control_phy_power(struct device *dev, int on)
@@ -91,7 +91,7 @@ static inline void omap_control_usb_set_mode(struct device *dev,
 {
 }
 
-static inline void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
+static inline void omap_control_pcie_pcs(struct device *dev, u8 delay)
 {
 }
 #endif
index 5087fff96d86a53d04443235aaa9351e5cc6b5ba..cc2bdafb0c69cfe61d1ff99f69423b87d2322eee 100644 (file)
@@ -26,6 +26,8 @@
 struct st21nfca_nfc_platform_data {
        unsigned int gpio_ena;
        unsigned int irq_polarity;
+       bool is_ese_present;
+       bool is_uicc_present;
 };
 
 #endif /* _ST21NFCA_HCI_H_ */
index c3b432f5b63e7eea706738a951d64e2556033ee4..b023373d987419652563e3d0a06391b1b3672ab9 100644 (file)
@@ -19,8 +19,6 @@
 #ifndef _ST21NFCB_NCI_H_
 #define _ST21NFCB_NCI_H_
 
-#include <linux/i2c.h>
-
 #define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci"
 
 struct st21nfcb_nfc_platform_data {
@@ -28,4 +26,4 @@ struct st21nfcb_nfc_platform_data {
        unsigned int irq_polarity;
 };
 
-#endif /* _ST21NFCA_HCI_H_ */
+#endif /* _ST21NFCB_NCI_H_ */
index c8f170324e643ccb895dacfd9b06a66a8afa7193..4d5bf5726578c58b739a79a5f093e5f7c4a009a3 100644 (file)
@@ -10,9 +10,6 @@
 extern const char linux_banner[];
 extern const char linux_proc_banner[];
 
-extern char *log_buf_addr_get(void);
-extern u32 log_buf_len_get(void);
-
 static inline int printk_get_level(const char *buffer)
 {
        if (buffer[0] == KERN_SOH_ASCII && buffer[1]) {
@@ -163,6 +160,8 @@ extern int kptr_restrict;
 
 extern void wake_up_klogd(void);
 
+char *log_buf_addr_get(void);
+u32 log_buf_len_get(void);
 void log_buf_kexec_setup(void);
 void __init setup_log_buf(int early);
 void dump_stack_set_arch_desc(const char *fmt, ...);
@@ -198,6 +197,16 @@ static inline void wake_up_klogd(void)
 {
 }
 
+static inline char *log_buf_addr_get(void)
+{
+       return NULL;
+}
+
+static inline u32 log_buf_len_get(void)
+{
+       return 0;
+}
+
 static inline void log_buf_kexec_setup(void)
 {
 }
index 50978b781a19c4d82c60c44918f5049c4cd25272..097d7eb2441e529f139822b6bf4fe5031dc5e710 100644 (file)
@@ -321,6 +321,49 @@ struct dquot_operations {
 
 struct path;
 
+/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */
+struct qc_dqblk {
+       int d_fieldmask;        /* mask of fields to change in ->set_dqblk() */
+       u64 d_spc_hardlimit;    /* absolute limit on used space */
+       u64 d_spc_softlimit;    /* preferred limit on used space */
+       u64 d_ino_hardlimit;    /* maximum # allocated inodes */
+       u64 d_ino_softlimit;    /* preferred inode limit */
+       u64 d_space;            /* Space owned by the user */
+       u64 d_ino_count;        /* # inodes owned by the user */
+       s64 d_ino_timer;        /* zero if within inode limits */
+                               /* if not, we refuse service */
+       s64 d_spc_timer;        /* similar to above; for space */
+       int d_ino_warns;        /* # warnings issued wrt num inodes */
+       int d_spc_warns;        /* # warnings issued wrt used space */
+       u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */
+       u64 d_rt_spc_softlimit; /* preferred limit on RT space */
+       u64 d_rt_space;         /* realtime space owned */
+       s64 d_rt_spc_timer;     /* similar to above; for RT space */
+       int d_rt_spc_warns;     /* # warnings issued wrt RT space */
+};
+
+/* Field specifiers for ->set_dqblk() in struct qc_dqblk */
+#define        QC_INO_SOFT     (1<<0)
+#define        QC_INO_HARD     (1<<1)
+#define        QC_SPC_SOFT     (1<<2)
+#define        QC_SPC_HARD     (1<<3)
+#define        QC_RT_SPC_SOFT  (1<<4)
+#define        QC_RT_SPC_HARD  (1<<5)
+#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \
+                      QC_RT_SPC_SOFT | QC_RT_SPC_HARD)
+#define        QC_SPC_TIMER    (1<<6)
+#define        QC_INO_TIMER    (1<<7)
+#define        QC_RT_SPC_TIMER (1<<8)
+#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER)
+#define        QC_SPC_WARNS    (1<<9)
+#define        QC_INO_WARNS    (1<<10)
+#define        QC_RT_SPC_WARNS (1<<11)
+#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS)
+#define        QC_SPACE        (1<<12)
+#define        QC_INO_COUNT    (1<<13)
+#define        QC_RT_SPACE     (1<<14)
+#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE)
+
 /* Operations handling requests from userspace */
 struct quotactl_ops {
        int (*quota_on)(struct super_block *, int, int, struct path *);
@@ -329,8 +372,8 @@ struct quotactl_ops {
        int (*quota_sync)(struct super_block *, int);
        int (*get_info)(struct super_block *, int, struct if_dqinfo *);
        int (*set_info)(struct super_block *, int, struct if_dqinfo *);
-       int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
-       int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
+       int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+       int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
        int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
        int (*set_xstate)(struct super_block *, unsigned int, int);
        int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
index f23538a6e411f4e1d5700ebbed96480f1425a9e3..29e3455f7d41f7f951e95d544acefd970388e767 100644 (file)
@@ -98,9 +98,9 @@ int dquot_quota_sync(struct super_block *sb, int type);
 int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
 int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
 int dquot_get_dqblk(struct super_block *sb, struct kqid id,
-               struct fs_disk_quota *di);
+               struct qc_dqblk *di);
 int dquot_set_dqblk(struct super_block *sb, struct kqid id,
-               struct fs_disk_quota *di);
+               struct qc_dqblk *di);
 
 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
 int dquot_transfer(struct inode *inode, struct iattr *iattr);
index b93fd89b2e5e086e23f054d33111c042317f5dbd..58851275fed98c352fdd4995e95f1ebe806649e7 100644 (file)
 #ifndef _LINUX_RHASHTABLE_H
 #define _LINUX_RHASHTABLE_H
 
-#include <linux/rculist.h>
+#include <linux/compiler.h>
+#include <linux/list_nulls.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+
+/*
+ * The end of the chain is marked with a special nulls marks which has
+ * the following format:
+ *
+ * +-------+-----------------------------------------------------+-+
+ * | Base  |                      Hash                           |1|
+ * +-------+-----------------------------------------------------+-+
+ *
+ * Base (4 bits) : Reserved to distinguish between multiple tables.
+ *                 Specified via &struct rhashtable_params.nulls_base.
+ * Hash (27 bits): Full hash (unmasked) of first element added to bucket
+ * 1 (1 bit)     : Nulls marker (always set)
+ *
+ * The remaining bits of the next pointer remain unused for now.
+ */
+#define RHT_BASE_BITS          4
+#define RHT_HASH_BITS          27
+#define RHT_BASE_SHIFT         RHT_HASH_BITS
 
 struct rhash_head {
        struct rhash_head __rcu         *next;
 };
 
-#define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL)
-
+/**
+ * struct bucket_table - Table of hash buckets
+ * @size: Number of hash buckets
+ * @locks_mask: Mask to apply before accessing locks[]
+ * @locks: Array of spinlocks protecting individual buckets
+ * @buckets: size * hash buckets
+ */
 struct bucket_table {
        size_t                          size;
+       unsigned int                    locks_mask;
+       spinlock_t                      *locks;
        struct rhash_head __rcu         *buckets[];
 };
 
@@ -45,11 +74,16 @@ struct rhashtable;
  * @hash_rnd: Seed to use while hashing
  * @max_shift: Maximum number of shifts while expanding
  * @min_shift: Minimum number of shifts while shrinking
+ * @nulls_base: Base value to generate nulls marker
+ * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
  * @hashfn: Function to hash key
  * @obj_hashfn: Function to hash object
  * @grow_decision: If defined, may return true if table should expand
  * @shrink_decision: If defined, may return true if table should shrink
- * @mutex_is_held: Must return true if protecting mutex is held
+ *
+ * Note: when implementing the grow and shrink decision function, min/max
+ * shift must be enforced, otherwise, resizing watermarks they set may be
+ * useless.
  */
 struct rhashtable_params {
        size_t                  nelem_hint;
@@ -59,36 +93,95 @@ struct rhashtable_params {
        u32                     hash_rnd;
        size_t                  max_shift;
        size_t                  min_shift;
+       u32                     nulls_base;
+       size_t                  locks_mul;
        rht_hashfn_t            hashfn;
        rht_obj_hashfn_t        obj_hashfn;
        bool                    (*grow_decision)(const struct rhashtable *ht,
                                                 size_t new_size);
        bool                    (*shrink_decision)(const struct rhashtable *ht,
                                                   size_t new_size);
-#ifdef CONFIG_PROVE_LOCKING
-       int                     (*mutex_is_held)(void *parent);
-       void                    *parent;
-#endif
 };
 
 /**
  * struct rhashtable - Hash table handle
  * @tbl: Bucket table
+ * @future_tbl: Table under construction during expansion/shrinking
  * @nelems: Number of elements in table
  * @shift: Current size (1 << shift)
  * @p: Configuration parameters
+ * @run_work: Deferred worker to expand/shrink asynchronously
+ * @mutex: Mutex to protect current/future table swapping
+ * @walkers: List of active walkers
+ * @being_destroyed: True if table is set up for destruction
  */
 struct rhashtable {
        struct bucket_table __rcu       *tbl;
-       size_t                          nelems;
-       size_t                          shift;
+       struct bucket_table __rcu       *future_tbl;
+       atomic_t                        nelems;
+       atomic_t                        shift;
        struct rhashtable_params        p;
+       struct work_struct              run_work;
+       struct mutex                    mutex;
+       struct list_head                walkers;
+       bool                            being_destroyed;
+};
+
+/**
+ * struct rhashtable_walker - Hash table walker
+ * @list: List entry on list of walkers
+ * @resize: Resize event occured
+ */
+struct rhashtable_walker {
+       struct list_head list;
+       bool resize;
 };
 
+/**
+ * struct rhashtable_iter - Hash table iterator, fits into netlink cb
+ * @ht: Table to iterate through
+ * @p: Current pointer
+ * @walker: Associated rhashtable walker
+ * @slot: Current slot
+ * @skip: Number of entries to skip in slot
+ */
+struct rhashtable_iter {
+       struct rhashtable *ht;
+       struct rhash_head *p;
+       struct rhashtable_walker *walker;
+       unsigned int slot;
+       unsigned int skip;
+};
+
+static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
+{
+       return NULLS_MARKER(ht->p.nulls_base + hash);
+}
+
+#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
+       ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
+
+static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
+{
+       return ((unsigned long) ptr & 1);
+}
+
+static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
+{
+       return ((unsigned long) ptr) >> 1;
+}
+
 #ifdef CONFIG_PROVE_LOCKING
-int lockdep_rht_mutex_is_held(const struct rhashtable *ht);
+int lockdep_rht_mutex_is_held(struct rhashtable *ht);
+int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
 #else
-static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
+static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
+{
+       return 1;
+}
+
+static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
+                                            u32 hash)
 {
        return 1;
 }
@@ -96,13 +189,8 @@ static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
 
 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
 
-u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len);
-u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr);
-
 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
-void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
-                            struct rhash_head __rcu **pprev);
 
 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
@@ -110,11 +198,23 @@ bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
 int rhashtable_expand(struct rhashtable *ht);
 int rhashtable_shrink(struct rhashtable *ht);
 
-void *rhashtable_lookup(const struct rhashtable *ht, const void *key);
-void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
+void *rhashtable_lookup(struct rhashtable *ht, const void *key);
+void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
                                bool (*compare)(void *, void *), void *arg);
 
-void rhashtable_destroy(const struct rhashtable *ht);
+bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
+bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
+                                     struct rhash_head *obj,
+                                     bool (*compare)(void *, void *),
+                                     void *arg);
+
+int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
+void rhashtable_walk_exit(struct rhashtable_iter *iter);
+int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
+void *rhashtable_walk_next(struct rhashtable_iter *iter);
+void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
+
+void rhashtable_destroy(struct rhashtable *ht);
 
 #define rht_dereference(p, ht) \
        rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -122,92 +222,146 @@ void rhashtable_destroy(const struct rhashtable *ht);
 #define rht_dereference_rcu(p, ht) \
        rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
 
-#define rht_entry(ptr, type, member) container_of(ptr, type, member)
-#define rht_entry_safe(ptr, type, member) \
-({ \
-       typeof(ptr) __ptr = (ptr); \
-          __ptr ? rht_entry(__ptr, type, member) : NULL; \
-})
+#define rht_dereference_bucket(p, tbl, hash) \
+       rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
+
+#define rht_dereference_bucket_rcu(p, tbl, hash) \
+       rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
 
-#define rht_next_entry_safe(pos, ht, member) \
-({ \
-       pos ? rht_entry_safe(rht_dereference((pos)->member.next, ht), \
-                            typeof(*(pos)), member) : NULL; \
-})
+#define rht_entry(tpos, pos, member) \
+       ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
+
+/**
+ * rht_for_each_continue - continue iterating over hash chain
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @head:      the previous &struct rhash_head to continue from
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ */
+#define rht_for_each_continue(pos, head, tbl, hash) \
+       for (pos = rht_dereference_bucket(head, tbl, hash); \
+            !rht_is_a_nulls(pos); \
+            pos = rht_dereference_bucket((pos)->next, tbl, hash))
 
 /**
  * rht_for_each - iterate over hash chain
- * @pos:       &struct rhash_head to use as a loop cursor.
- * @head:      head of the hash chain (struct rhash_head *)
- * @ht:                pointer to your struct rhashtable
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
  */
-#define rht_for_each(pos, head, ht) \
-       for (pos = rht_dereference(head, ht); \
-            pos; \
-            pos = rht_dereference((pos)->next, ht))
+#define rht_for_each(pos, tbl, hash) \
+       rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
+
+/**
+ * rht_for_each_entry_continue - continue iterating over hash chain
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @head:      the previous &struct rhash_head to continue from
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ * @member:    name of the &struct rhash_head within the hashable struct.
+ */
+#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member)        \
+       for (pos = rht_dereference_bucket(head, tbl, hash);             \
+            (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);    \
+            pos = rht_dereference_bucket((pos)->next, tbl, hash))
 
 /**
  * rht_for_each_entry - iterate over hash chain of given type
- * @pos:       type * to use as a loop cursor.
- * @head:      head of the hash chain (struct rhash_head *)
- * @ht:                pointer to your struct rhashtable
- * @member:    name of the rhash_head within the hashable struct.
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ * @member:    name of the &struct rhash_head within the hashable struct.
  */
-#define rht_for_each_entry(pos, head, ht, member) \
-       for (pos = rht_entry_safe(rht_dereference(head, ht), \
-                                  typeof(*(pos)), member); \
-            pos; \
-            pos = rht_next_entry_safe(pos, ht, member))
+#define rht_for_each_entry(tpos, pos, tbl, hash, member)               \
+       rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash],    \
+                                   tbl, hash, member)
 
 /**
  * rht_for_each_entry_safe - safely iterate over hash chain of given type
- * @pos:       type * to use as a loop cursor.
- * @n:         type * to use for temporary next object storage
- * @head:      head of the hash chain (struct rhash_head *)
- * @ht:                pointer to your struct rhashtable
- * @member:    name of the rhash_head within the hashable struct.
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @next:      the &struct rhash_head to use as next in loop cursor.
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ * @member:    name of the &struct rhash_head within the hashable struct.
  *
  * This hash chain list-traversal primitive allows for the looped code to
  * remove the loop cursor from the list.
  */
-#define rht_for_each_entry_safe(pos, n, head, ht, member)              \
-       for (pos = rht_entry_safe(rht_dereference(head, ht), \
-                                 typeof(*(pos)), member), \
-            n = rht_next_entry_safe(pos, ht, member); \
-            pos; \
-            pos = n, \
-            n = rht_next_entry_safe(pos, ht, member))
+#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member)        \
+       for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
+            next = !rht_is_a_nulls(pos) ?                                  \
+                      rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
+            (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);        \
+            pos = next,                                                    \
+            next = !rht_is_a_nulls(pos) ?                                  \
+                      rht_dereference_bucket(pos->next, tbl, hash) : NULL)
+
+/**
+ * rht_for_each_rcu_continue - continue iterating over rcu hash chain
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @head:      the previous &struct rhash_head to continue from
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_rcu_continue(pos, head, tbl, hash)                        \
+       for (({barrier(); }),                                           \
+            pos = rht_dereference_bucket_rcu(head, tbl, hash);         \
+            !rht_is_a_nulls(pos);                                      \
+            pos = rcu_dereference_raw(pos->next))
 
 /**
  * rht_for_each_rcu - iterate over rcu hash chain
- * @pos:       &struct rhash_head to use as a loop cursor.
- * @head:      head of the hash chain (struct rhash_head *)
- * @ht:                pointer to your struct rhashtable
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_rcu(pos, tbl, hash)                               \
+       rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
+
+/**
+ * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @head:      the previous &struct rhash_head to continue from
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ * @member:    name of the &struct rhash_head within the hashable struct.
  *
  * This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu fkht mutation primitives such as rht_insert() as long as the
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
  * traversal is guarded by rcu_read_lock().
  */
-#define rht_for_each_rcu(pos, head, ht) \
-       for (pos = rht_dereference_rcu(head, ht); \
-            pos; \
-            pos = rht_dereference_rcu((pos)->next, ht))
+#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
+       for (({barrier(); }),                                               \
+            pos = rht_dereference_bucket_rcu(head, tbl, hash);             \
+            (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);        \
+            pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
 
 /**
  * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
- * @pos:       type * to use as a loop cursor.
- * @head:      head of the hash chain (struct rhash_head *)
- * @member:    name of the rhash_head within the hashable struct.
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct rhash_head to use as a loop cursor.
+ * @tbl:       the &struct bucket_table
+ * @hash:      the hash value / bucket index
+ * @member:    name of the &struct rhash_head within the hashable struct.
  *
  * This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu fkht mutation primitives such as rht_insert() as long as the
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
  * traversal is guarded by rcu_read_lock().
  */
-#define rht_for_each_entry_rcu(pos, head, member) \
-       for (pos = rht_entry_safe(rcu_dereference_raw(head), \
-                                 typeof(*(pos)), member); \
-            pos; \
-            pos = rht_entry_safe(rcu_dereference_raw((pos)->member.next), \
-                                 typeof(*(pos)), member))
+#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member)           \
+       rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
+                                       tbl, hash, member)
 
 #endif /* _LINUX_RHASHTABLE_H */
index c0c2bce6b0b7bab50f5c7bb5a1353d8991f8e08b..d9d7e7e56352a8855def8f86af24fdb569ee480c 100644 (file)
@@ -36,6 +36,16 @@ struct anon_vma {
         */
        atomic_t refcount;
 
+       /*
+        * Count of child anon_vmas and VMAs which points to this anon_vma.
+        *
+        * This counter is used for making decision about reusing anon_vma
+        * instead of forking new one. See comments in function anon_vma_clone.
+        */
+       unsigned degree;
+
+       struct anon_vma *parent;        /* Parent of this anon_vma */
+
        /*
         * NOTE: the LSB of the rb_root.rb_node is set by
         * mm_take_all_locks() _after_ taking the above lock. So the
index 85ab7d72b54c2f269812015b19544674bc6dcd72..1bb36edb66b96572bd361c392715ee2d166a2b6b 100644 (file)
@@ -626,8 +626,11 @@ struct sk_buff {
        __u32                   hash;
        __be16                  vlan_proto;
        __u16                   vlan_tci;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       unsigned int    napi_id;
+#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
+       union {
+               unsigned int    napi_id;
+               unsigned int    sender_cpu;
+       };
 #endif
 #ifdef CONFIG_NETWORK_SECMARK
        __u32                   secmark;
@@ -2484,19 +2487,18 @@ static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
 }
 
 static inline int skb_add_data(struct sk_buff *skb,
-                              char __user *from, int copy)
+                              struct iov_iter *from, int copy)
 {
        const int off = skb->len;
 
        if (skb->ip_summed == CHECKSUM_NONE) {
-               int err = 0;
-               __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
-                                                           copy, 0, &err);
-               if (!err) {
+               __wsum csum = 0;
+               if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
+                                           &csum, from) == copy) {
                        skb->csum = csum_block_add(skb->csum, csum, off);
                        return 0;
                }
-       } else if (!copy_from_user(skb_put(skb, copy), from, copy))
+       } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
                return 0;
 
        __skb_trim(skb, off);
@@ -2693,8 +2695,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
 
 static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
 {
-       /* XXX: stripping const */
-       return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len);
+       return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
 }
 
 static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
@@ -3071,7 +3072,7 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
 
 #define skb_checksum_validate_zero_check(skb, proto, check,            \
                                         compute_pseudo)                \
-       __skb_checksum_validate_(skb, proto, true, true, check, compute_pseudo)
+       __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
 
 #define skb_checksum_simple_validate(skb)                              \
        __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
@@ -3096,6 +3097,27 @@ do {                                                                     \
                                       compute_pseudo(skb, proto));     \
 } while (0)
 
+/* Update skbuf and packet to reflect the remote checksum offload operation.
+ * When called, ptr indicates the starting point for skb->csum when
+ * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
+ * here, skb_postpull_rcsum is done so skb->csum start is ptr.
+ */
+static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
+                                      int start, int offset)
+{
+       __wsum delta;
+
+        if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
+               __skb_checksum_complete(skb);
+               skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
+       }
+
+       delta = remcsum_adjust(ptr, skb->csum, start, offset);
+
+       /* Adjust skb->csum since we changed the packet */
+       skb->csum = csum_add(skb->csum, delta);
+}
+
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 void nf_conntrack_destroy(struct nf_conntrack *nfct);
 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
index 6e49a14365dc1bea4bc442097dcaebb7bb0c08a9..5c19cba34dce023a49c45d776751190834b88889 100644 (file)
@@ -318,13 +318,6 @@ struct ucred {
 /* IPX options */
 #define IPX_TYPE       1
 
-extern int csum_partial_copy_fromiovecend(unsigned char *kdata, 
-                                         struct iovec *iov, 
-                                         int offset, 
-                                         unsigned int len, __wsum *csump);
-extern unsigned long iov_pages(const struct iovec *iov, int offset,
-                              unsigned long nr_segs);
-
 extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
 extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
 
index 262ba4ef9a8ebacb24e7f7326a05bbcb2f666c69..3e18379dfa6f349ba48edfc6615232af41ccfb99 100644 (file)
@@ -190,6 +190,8 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # define raw_spin_lock_nested(lock, subclass) \
        _raw_spin_lock_nested(lock, subclass)
+# define raw_spin_lock_bh_nested(lock, subclass) \
+       _raw_spin_lock_bh_nested(lock, subclass)
 
 # define raw_spin_lock_nest_lock(lock, nest_lock)                      \
         do {                                                           \
@@ -205,6 +207,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
 # define raw_spin_lock_nested(lock, subclass)          \
        _raw_spin_lock(((void)(subclass), (lock)))
 # define raw_spin_lock_nest_lock(lock, nest_lock)      _raw_spin_lock(lock)
+# define raw_spin_lock_bh_nested(lock, subclass)       _raw_spin_lock_bh(lock)
 #endif
 
 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -324,6 +327,11 @@ do {                                                               \
        raw_spin_lock_nested(spinlock_check(lock), subclass);   \
 } while (0)
 
+#define spin_lock_bh_nested(lock, subclass)                    \
+do {                                                           \
+       raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
+} while (0)
+
 #define spin_lock_nest_lock(lock, nest_lock)                           \
 do {                                                                   \
        raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);       \
index 42dfab89e740aeb08de1896e491607789eacda4b..5344268e6e62fe7db66e2e305829309e968c4d2f 100644 (file)
@@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr);
 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)           __acquires(lock);
 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
                                                                __acquires(lock);
+void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
+                                                               __acquires(lock);
 void __lockfunc
 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
                                                                __acquires(lock);
index d0d188861ad69a1b574ea6ce96fb8dfaa6a59c3d..d3afef9d8dbe705b8270f653056e2f684441c7b2 100644 (file)
@@ -57,6 +57,7 @@
 
 #define _raw_spin_lock(lock)                   __LOCK(lock)
 #define _raw_spin_lock_nested(lock, subclass)  __LOCK(lock)
+#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock)
 #define _raw_read_lock(lock)                   __LOCK(lock)
 #define _raw_write_lock(lock)                  __LOCK(lock)
 #define _raw_spin_lock_bh(lock)                        __LOCK_BH(lock)
index 67309ece0772b9a28e054af553c9aabd76021699..1a7adb411647436feac207029d8e8efe19ac1193 100644 (file)
@@ -115,6 +115,7 @@ struct tcp_request_sock {
        u32                             rcv_isn;
        u32                             snt_isn;
        u32                             snt_synack; /* synack sent time */
+       u32                             last_oow_ack_time; /* last SYNACK */
        u32                             rcv_nxt; /* the ack # by SYNACK. For
                                                  * FastOpen it's the seq#
                                                  * after data-in-SYN.
@@ -152,6 +153,7 @@ struct tcp_sock {
        u32     snd_sml;        /* Last byte of the most recently transmitted small packet */
        u32     rcv_tstamp;     /* timestamp of last received ACK (for keepalives) */
        u32     lsndtime;       /* timestamp of last sent data packet (for restart window) */
+       u32     last_oow_ack_time;  /* timestamp of last out-of-window ACK */
 
        u32     tsoffset;       /* timestamp offset */
 
@@ -340,6 +342,10 @@ struct tcp_timewait_sock {
        u32                       tw_rcv_wnd;
        u32                       tw_ts_offset;
        u32                       tw_ts_recent;
+
+       /* The time we sent the last out-of-window ACK: */
+       u32                       tw_last_oow_ack_time;
+
        long                      tw_ts_recent_stamp;
 #ifdef CONFIG_TCP_MD5SIG
        struct tcp_md5sig_key     *tw_md5_key;
index 203c2ad40d7184726b585b7c644b0a7f11ad6894..beebe3a02d43f5c527633cbd12af690c02e8288d 100644 (file)
@@ -110,6 +110,19 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
        return true;
 }
 
+static inline bool timeval_valid(const struct timeval *tv)
+{
+       /* Dates before 1970 are bogus */
+       if (tv->tv_sec < 0)
+               return false;
+
+       /* Can't have more microseconds then a second */
+       if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
+               return false;
+
+       return true;
+}
+
 extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
 
 #define CURRENT_TIME           (current_kernel_time())
index ee3277593222cf314f9b030eec6ec09d0c38c4e4..247cfdcc4b08bbf377ff5819ebd02683806b0c83 100644 (file)
@@ -49,11 +49,7 @@ struct udp_sock {
        unsigned int     corkflag;      /* Cork is required */
        __u8             encap_type;    /* Is this an Encapsulation socket? */
        unsigned char    no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
-                        no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
-                        convert_csum:1;/* On receive, convert checksum
-                                        * unnecessary to checksum complete
-                                        * if possible.
-                                        */
+                        no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */
        /*
         * Following member retains the information to create a UDP header
         * when the socket is uncorked.
@@ -102,16 +98,6 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
        return udp_sk(sk)->no_check6_rx;
 }
 
-static inline void udp_set_convert_csum(struct sock *sk, bool val)
-{
-       udp_sk(sk)->convert_csum = val;
-}
-
-static inline bool udp_get_convert_csum(struct sock *sk)
-{
-       return udp_sk(sk)->convert_csum;
-}
-
 #define udp_portaddr_for_each_entry(__sk, node, list) \
        hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
 
index 1c5e453f7ea997364a4d26852a2b532ec3b8acd5..3e0cb4ea3905905cd3c6c22082754818f5f7798d 100644 (file)
@@ -135,10 +135,4 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
 size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
 
-int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
-int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
-                       int offset, int len);
-int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
-                     int offset, int len);
-
 #endif
index 5691f752ce8f0b6fdd83f43502621f0ca06eebca..63df3a2a8ce54aa184d8a7806f62d9e9cf121050 100644 (file)
@@ -74,7 +74,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
 ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
                        int mode);
 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
-                         void *iov, size_t iov_size, int mode);
+                         struct msghdr *msg, size_t iov_size, int mode);
 ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
                          struct msghdr *msg, size_t iov_size, int mode);
 ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size,
index a219be961c0a2cb7ab21ce08353bdb1c968d2b92..00048339c23e4f252ee6a4b15cd38b49b8032de4 100644 (file)
@@ -177,7 +177,6 @@ int write_cache_pages(struct address_space *mapping,
                      struct writeback_control *wbc, writepage_t writepage,
                      void *data);
 int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
-void set_page_dirty_balance(struct page *page);
 void writeback_set_ratelimit(void);
 void tag_pages_for_writeback(struct address_space *mapping,
                             pgoff_t start, pgoff_t end);
index 58695ffeb13814786227f7b04d3a4930a8d98bdd..e00455aab18c2cd7274615eecc4248efc22cd4a8 100644 (file)
@@ -273,7 +273,7 @@ struct l2cap_ctrl {
 
 struct hci_dev;
 
-typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status);
+typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
 
 struct hci_req_ctrl {
        bool                    start;
index 1849a437f6e1aeba2414c96db45d3c18d3c224d6..8e54f825153c95832246c660521d62c56d2f4117 100644 (file)
@@ -102,6 +102,18 @@ enum {
         */
        HCI_QUIRK_FIXUP_BUFFER_SIZE,
 
+       /* When this quirk is set, then a controller that does not
+        * indicate support for Inquiry Result with RSSI is assumed to
+        * support it anyway. Some early Bluetooth 1.2 controllers had
+        * wrongly configured local features that will require forcing
+        * them to enable this mode. Getting RSSI information with the
+        * inquiry responses is preferred since it allows for a better
+        * user expierence.
+        *
+        * This quirk must be set before hci_register_dev is called.
+        */
+       HCI_QUIRK_FIXUP_INQUIRY_MODE,
+
        /* When this quirk is set, then the HCI Read Local Supported
         * Commands command is not supported. In general Bluetooth 1.2
         * and later controllers should support this command. However
@@ -172,8 +184,7 @@ enum {
  */
 enum {
        HCI_DUT_MODE,
-       HCI_FORCE_SC,
-       HCI_FORCE_LESC,
+       HCI_FORCE_BREDR_SMP,
        HCI_FORCE_STATIC_ADDR,
 };
 
@@ -844,11 +855,26 @@ struct hci_cp_set_event_flt {
 #define HCI_CONN_SETUP_AUTO_OFF        0x01
 #define HCI_CONN_SETUP_AUTO_ON 0x02
 
+#define HCI_OP_READ_STORED_LINK_KEY    0x0c0d
+struct hci_cp_read_stored_link_key {
+       bdaddr_t bdaddr;
+       __u8     read_all;
+} __packed;
+struct hci_rp_read_stored_link_key {
+       __u8     status;
+       __u8     max_keys;
+       __u8     num_keys;
+} __packed;
+
 #define HCI_OP_DELETE_STORED_LINK_KEY  0x0c12
 struct hci_cp_delete_stored_link_key {
        bdaddr_t bdaddr;
        __u8     delete_all;
 } __packed;
+struct hci_rp_delete_stored_link_key {
+       __u8     status;
+       __u8     num_keys;
+} __packed;
 
 #define HCI_MAX_NAME_LENGTH            248
 
index 3e7e5110f29893cef41736cc48615b4136330225..52863c3e0b132bc59224feef3f9e7acd678ef45a 100644 (file)
@@ -79,6 +79,8 @@ struct discovery_state {
        s8                      rssi;
        u16                     uuid_count;
        u8                      (*uuids)[16];
+       unsigned long           scan_start;
+       unsigned long           scan_duration;
 };
 
 struct hci_conn_hash {
@@ -145,6 +147,7 @@ struct oob_data {
        struct list_head list;
        bdaddr_t bdaddr;
        u8 bdaddr_type;
+       u8 present;
        u8 hash192[16];
        u8 rand192[16];
        u8 hash256[16];
@@ -205,6 +208,8 @@ struct hci_dev {
        __u16           lmp_subver;
        __u16           voice_setting;
        __u8            num_iac;
+       __u8            stored_max_keys;
+       __u8            stored_num_keys;
        __u8            io_capability;
        __s8            inq_tx_power;
        __u16           page_scan_interval;
@@ -230,6 +235,7 @@ struct hci_dev {
        __u16           conn_info_min_age;
        __u16           conn_info_max_age;
        __u8            ssp_debug_mode;
+       __u8            hw_error_code;
        __u32           clock;
 
        __u16           devid_source;
@@ -291,6 +297,7 @@ struct hci_dev {
 
        struct work_struct      power_on;
        struct delayed_work     power_off;
+       struct work_struct      error_reset;
 
        __u16                   discov_timeout;
        struct delayed_work     discov_off;
@@ -349,6 +356,7 @@ struct hci_dev {
        unsigned long           dev_flags;
 
        struct delayed_work     le_scan_disable;
+       struct delayed_work     le_scan_restart;
 
        __s8                    adv_tx_power;
        __u8                    adv_data[HCI_MAX_AD_LENGTH];
@@ -367,6 +375,7 @@ struct hci_dev {
        int (*setup)(struct hci_dev *hdev);
        int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
        void (*notify)(struct hci_dev *hdev, unsigned int evt);
+       void (*hw_error)(struct hci_dev *hdev, u8 code);
        int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
 };
 
@@ -525,6 +534,8 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
        hdev->discovery.uuid_count = 0;
        kfree(hdev->discovery.uuids);
        hdev->discovery.uuids = NULL;
+       hdev->discovery.scan_start = 0;
+       hdev->discovery.scan_duration = 0;
 }
 
 bool hci_discovery_active(struct hci_dev *hdev);
@@ -779,7 +790,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn);
 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
                      bool initiator);
-int hci_conn_change_link_key(struct hci_conn *conn);
 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
 
 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
@@ -1017,8 +1027,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
 
 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
                                !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
-#define bredr_sc_enabled(dev) ((lmp_sc_capable(dev) || \
-                               test_bit(HCI_FORCE_SC, &(dev)->dbg_flags)) && \
+#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
                               test_bit(HCI_SC_ENABLED, &(dev)->dev_flags))
 
 /* ----- HCI protocols ----- */
@@ -1325,6 +1334,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
 #define DISCOV_INTERLEAVED_TIMEOUT     5120    /* msec */
 #define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
 #define DISCOV_BREDR_INQUIRY_LEN       0x08
+#define DISCOV_LE_RESTART_DELAY                msecs_to_jiffies(200)   /* msec */
 
 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
 int mgmt_new_settings(struct hci_dev *hdev);
@@ -1369,7 +1379,6 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
 void mgmt_auth_failed(struct hci_conn *conn, u8 status);
 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
-void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
                                    u8 status);
 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
index 95c34d5180fa1ec656bf996ed034817674b34ecf..e218a30f206134776ab2eed1a19a98c0d31ef395 100644 (file)
@@ -301,10 +301,6 @@ struct mgmt_cp_user_passkey_neg_reply {
 #define MGMT_OP_READ_LOCAL_OOB_DATA    0x0020
 #define MGMT_READ_LOCAL_OOB_DATA_SIZE  0
 struct mgmt_rp_read_local_oob_data {
-       __u8    hash[16];
-       __u8    rand[16];
-} __packed;
-struct mgmt_rp_read_local_oob_ext_data {
        __u8    hash192[16];
        __u8    rand192[16];
        __u8    hash256[16];
index e01d903633eff269866dda0f23e65f60614adf11..f04cdbb7848e564062d714b9a6a9ba845c2440ab 100644 (file)
@@ -274,7 +274,6 @@ void bond_3ad_handle_link_change(struct slave *slave, char link);
 int  bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
 int  __bond_3ad_get_active_agg_info(struct bonding *bond,
                                    struct ad_info *ad_info);
-int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
 int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
                         struct slave *slave);
 int bond_3ad_set_carrier(struct bonding *bond);
index 983a94b86b954c90548df20fe6f574efb6359c70..4e17095ad46a1fb196ac3724e2e0c33603e4b3fa 100644 (file)
@@ -150,6 +150,12 @@ struct bond_parm_tbl {
        int mode;
 };
 
+struct netdev_notify_work {
+       struct delayed_work     work;
+       struct slave            *slave;
+       struct net_device       *dev;
+};
+
 struct slave {
        struct net_device *dev; /* first - useful for panic debug */
        struct bonding *bond; /* our master */
@@ -243,6 +249,8 @@ struct bonding {
 #define bond_slave_get_rtnl(dev) \
        ((struct slave *) rtnl_dereference(dev->rx_handler_data))
 
+void bond_queue_slave_event(struct slave *slave);
+
 struct bond_vlan_tag {
        __be16          vlan_proto;
        unsigned short  vlan_id;
@@ -315,6 +323,7 @@ static inline void bond_set_active_slave(struct slave *slave)
 {
        if (slave->backup) {
                slave->backup = 0;
+               bond_queue_slave_event(slave);
                rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
        }
 }
@@ -323,6 +332,7 @@ static inline void bond_set_backup_slave(struct slave *slave)
 {
        if (!slave->backup) {
                slave->backup = 1;
+               bond_queue_slave_event(slave);
                rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
        }
 }
@@ -336,6 +346,7 @@ static inline void bond_set_slave_state(struct slave *slave,
        slave->backup = slave_state;
        if (notify) {
                rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
+               bond_queue_slave_event(slave);
                slave->should_notify = 0;
        } else {
                if (slave->should_notify)
@@ -490,6 +501,12 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
        return slave->inactive;
 }
 
+static inline void bond_set_slave_link_state(struct slave *slave, int state)
+{
+       slave->link = state;
+       bond_queue_slave_event(slave);
+}
+
 static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local)
 {
        struct in_device *in_dev;
@@ -525,6 +542,7 @@ void bond_sysfs_slave_del(struct slave *slave);
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
 u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
+int bond_set_carrier(struct bonding *bond);
 void bond_select_active_slave(struct bonding *bond);
 void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
 void bond_create_debugfs(void);
index 7b44ba0a76328eed71693790923f8ebf8f516c4c..64e09e1e809960be7daaef505e2e39dbab01c238 100644 (file)
@@ -1493,6 +1493,10 @@ struct cfg80211_match_set {
  * @rcu_head: RCU callback used to free the struct
  * @owner_nlportid: netlink portid of owner (if this should is a request
  *     owned by a particular socket)
+ * @delay: delay in seconds to use before starting the first scan
+ *     cycle.  The driver may ignore this parameter and start
+ *     immediately (or at any other time), if this feature is not
+ *     supported.
  */
 struct cfg80211_sched_scan_request {
        struct cfg80211_ssid *ssids;
@@ -1506,6 +1510,7 @@ struct cfg80211_sched_scan_request {
        struct cfg80211_match_set *match_sets;
        int n_match_sets;
        s32 min_rssi_thold;
+       u32 delay;
 
        u8 mac_addr[ETH_ALEN] __aligned(2);
        u8 mac_addr_mask[ETH_ALEN] __aligned(2);
index 7ee2df083542365e9d317fa1dbc2bbcfbbe34aa6..dc8fd81412bf319b7ba59c8b12c65c9363db75f6 100644 (file)
@@ -22,9 +22,9 @@ struct flow_keys {
                __be32 ports;
                __be16 port16[2];
        };
-       u16 thoff;
-       u16 n_proto;
-       u8 ip_proto;
+       u16     thoff;
+       __be16  n_proto;
+       u8      ip_proto;
 };
 
 bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
index 84125088c309afb988daa4b24368c2d9e0f02588..0574abd3db86bc0a796819a387dfc751adfcb04d 100644 (file)
@@ -27,13 +27,18 @@ struct genl_info;
  * @maxattr: maximum number of attributes supported
  * @netnsok: set to true if the family can handle network
  *     namespaces and should be presented in all of them
+ * @parallel_ops: operations can be called in parallel and aren't
+ *     synchronized by the core genetlink code
  * @pre_doit: called before an operation's doit callback, it may
  *     do additional, common, filtering and return an error
  * @post_doit: called after an operation's doit callback, it may
  *     undo operations done by pre_doit, for example release locks
  * @mcast_bind: a socket bound to the given multicast group (which
  *     is given as the offset into the groups array)
- * @mcast_unbind: a socket was unbound from the given multicast group
+ * @mcast_unbind: a socket was unbound from the given multicast group.
+ *     Note that unbind() will not be called symmetrically if the
+ *     generic netlink family is removed while there are still open
+ *     sockets.
  * @attrbuf: buffer to store parsed attributes
  * @family_list: family list
  * @mcgrps: multicast groups used by this family (private)
@@ -205,6 +210,23 @@ static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr,
                                   NLMSG_HDRLEN);
 }
 
+/**
+ * genlmsg_parse - parse attributes of a genetlink message
+ * @nlh: netlink message header
+ * @family: genetlink message family
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * */
+static inline int genlmsg_parse(const struct nlmsghdr *nlh,
+                               const struct genl_family *family,
+                               struct nlattr *tb[], int maxtype,
+                               const struct nla_policy *policy)
+{
+       return nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype,
+                          policy);
+}
+
 /**
  * genl_dump_check_consistent - check if sequence is consistent and advertise if not
  * @cb: netlink callback structure that stores the sequence number
@@ -245,9 +267,9 @@ static inline void *genlmsg_put_reply(struct sk_buff *skb,
  * @skb: socket buffer the message is stored in
  * @hdr: user specific header
  */
-static inline int genlmsg_end(struct sk_buff *skb, void *hdr)
+static inline void genlmsg_end(struct sk_buff *skb, void *hdr)
 {
-       return nlmsg_end(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
+       nlmsg_end(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
 }
 
 /**
index 112132cf8e2e878c0e4629c88e2065eadfda0f4f..14fb8d3390b4807ac2ed890efef079e0ca39ef54 100644 (file)
@@ -68,13 +68,12 @@ struct geneve_sock;
 typedef void (geneve_rcv_t)(struct geneve_sock *gs, struct sk_buff *skb);
 
 struct geneve_sock {
-       struct hlist_node       hlist;
+       struct list_head        list;
        geneve_rcv_t            *rcv;
        void                    *rcv_data;
-       struct work_struct      del_work;
        struct socket           *sock;
        struct rcu_head         rcu;
-       atomic_t                refcnt;
+       int                     refcnt;
        struct udp_offload      udp_offloads;
 };
 
@@ -91,7 +90,7 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
                    struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
                    __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
                    __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
-                   bool xnet);
+                   bool csum, bool xnet);
 #endif /*ifdef CONFIG_INET */
 
 #endif /*ifdef__NET_GENEVE_H */
index 734d9b5f577a8036594251ced6e1af265fbc1c6a..0f712c0bc0bf27a910ca224ba050de5e7135fb30 100644 (file)
@@ -8,25 +8,23 @@
 struct gro_cell {
        struct sk_buff_head     napi_skbs;
        struct napi_struct      napi;
-} ____cacheline_aligned_in_smp;
+};
 
 struct gro_cells {
-       unsigned int            gro_cells_mask;
-       struct gro_cell         *cells;
+       struct gro_cell __percpu        *cells;
 };
 
 static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
 {
-       struct gro_cell *cell = gcells->cells;
+       struct gro_cell *cell;
        struct net_device *dev = skb->dev;
 
-       if (!cell || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
+       if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
                netif_rx(skb);
                return;
        }
 
-       if (skb_rx_queue_recorded(skb))
-               cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
+       cell = this_cpu_ptr(gcells->cells);
 
        if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
                atomic_long_inc(&dev->rx_dropped);
@@ -72,15 +70,12 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
 {
        int i;
 
-       gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
-       gcells->cells = kcalloc(gcells->gro_cells_mask + 1,
-                               sizeof(struct gro_cell),
-                               GFP_KERNEL);
+       gcells->cells = alloc_percpu(struct gro_cell);
        if (!gcells->cells)
                return -ENOMEM;
 
-       for (i = 0; i <= gcells->gro_cells_mask; i++) {
-               struct gro_cell *cell = gcells->cells + i;
+       for_each_possible_cpu(i) {
+               struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
 
                skb_queue_head_init(&cell->napi_skbs);
                netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
@@ -91,16 +86,16 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
 
 static inline void gro_cells_destroy(struct gro_cells *gcells)
 {
-       struct gro_cell *cell = gcells->cells;
        int i;
 
-       if (!cell)
+       if (!gcells->cells)
                return;
-       for (i = 0; i <= gcells->gro_cells_mask; i++,cell++) {
+       for_each_possible_cpu(i) {
+               struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
                netif_napi_del(&cell->napi);
                skb_queue_purge(&cell->napi_skbs);
        }
-       kfree(gcells->cells);
+       free_percpu(gcells->cells);
        gcells->cells = NULL;
 }
 
index 848e85cb5c6128ecfe101e386657ce355f507b5c..5976bdecf58b05b26980c76ae140d7c016ca939b 100644 (file)
@@ -98,7 +98,8 @@ struct inet_connection_sock {
        const struct tcp_congestion_ops *icsk_ca_ops;
        const struct inet_connection_sock_af_ops *icsk_af_ops;
        unsigned int              (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
-       __u8                      icsk_ca_state;
+       __u8                      icsk_ca_state:7,
+                                 icsk_ca_dst_locked:1;
        __u8                      icsk_retransmits;
        __u8                      icsk_pending;
        __u8                      icsk_backoff;
index a829b77523cf3f28704dbc593ebf112802886951..eb16c7beed1e9570168d1ccbc5a7cd9cf31dd52d 100644 (file)
@@ -16,7 +16,7 @@
 #ifndef _INET_SOCK_H
 #define _INET_SOCK_H
 
-
+#include <linux/bitops.h>
 #include <linux/kmemcheck.h>
 #include <linux/string.h>
 #include <linux/types.h>
@@ -184,6 +184,7 @@ struct inet_sock {
                                mc_all:1,
                                nodefrag:1;
        __u8                    rcv_tos;
+       __u8                    convert_csum;
        int                     uc_index;
        int                     mc_index;
        __be32                  mc_addr;
@@ -194,6 +195,16 @@ struct inet_sock {
 #define IPCORK_OPT     1       /* ip-options has been held in ipcork.opt */
 #define IPCORK_ALLFRAG 2       /* always fragment (for ipv6 for now) */
 
+/* cmsg flags for inet */
+#define IP_CMSG_PKTINFO                BIT(0)
+#define IP_CMSG_TTL            BIT(1)
+#define IP_CMSG_TOS            BIT(2)
+#define IP_CMSG_RECVOPTS       BIT(3)
+#define IP_CMSG_RETOPTS                BIT(4)
+#define IP_CMSG_PASSSEC                BIT(5)
+#define IP_CMSG_ORIGDSTADDR    BIT(6)
+#define IP_CMSG_CHECKSUM       BIT(7)
+
 static inline struct inet_sock *inet_sk(const struct sock *sk)
 {
        return (struct inet_sock *)sk;
@@ -250,4 +261,20 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
        return flags;
 }
 
+static inline void inet_inc_convert_csum(struct sock *sk)
+{
+       inet_sk(sk)->convert_csum++;
+}
+
+static inline void inet_dec_convert_csum(struct sock *sk)
+{
+       if (inet_sk(sk)->convert_csum > 0)
+               inet_sk(sk)->convert_csum--;
+}
+
+static inline bool inet_get_convert_csum(struct sock *sk)
+{
+       return !!inet_sk(sk)->convert_csum;
+}
+
 #endif /* _INET_SOCK_H */
index 0bb620702929e7ad3b48f7aa40e5c73df3638141..025c61c0dffbfe9ddfcc2cd6f9eb38b2af504d3f 100644 (file)
@@ -39,11 +39,12 @@ struct inet_skb_parm {
        struct ip_options       opt;            /* Compiled IP options          */
        unsigned char           flags;
 
-#define IPSKB_FORWARDED                1
-#define IPSKB_XFRM_TUNNEL_SIZE 2
-#define IPSKB_XFRM_TRANSFORMED 4
-#define IPSKB_FRAG_COMPLETE    8
-#define IPSKB_REROUTED         16
+#define IPSKB_FORWARDED                BIT(0)
+#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
+#define IPSKB_XFRM_TRANSFORMED BIT(2)
+#define IPSKB_FRAG_COMPLETE    BIT(3)
+#define IPSKB_REROUTED         BIT(4)
+#define IPSKB_DOREDIRECT       BIT(5)
 
        u16                     frag_max_size;
 };
@@ -180,7 +181,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
        return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
 }
 
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
+void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
                           const struct ip_options *sopt,
                           __be32 daddr, __be32 saddr,
                           const struct ip_reply_arg *arg,
@@ -537,7 +538,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
  */
 
 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
-void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
+void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
 int ip_cmsg_send(struct net *net, struct msghdr *msg,
                 struct ipcm_cookie *ipc, bool allow_ipv6);
 int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
@@ -557,6 +558,11 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
                    u32 info);
 
+static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
+{
+       ip_cmsg_recv_offset(msg, skb, 0);
+}
+
 bool icmp_global_allow(void);
 extern int sysctl_icmp_msgs_per_sec;
 extern int sysctl_icmp_msgs_burst;
index 8eea35d32a75b3927316fa38304faf63632bf8dc..20e80fa7bbdd5a0effd39ed6761ac167912f7da3 100644 (file)
@@ -74,6 +74,11 @@ struct fib6_node {
 #define FIB6_SUBTREE(fn)       ((fn)->subtree)
 #endif
 
+struct mx6_config {
+       const u32 *mx;
+       DECLARE_BITMAP(mx_valid, RTAX_MAX);
+};
+
 /*
  *     routing information
  *
@@ -291,9 +296,8 @@ struct fib6_node *fib6_locate(struct fib6_node *root,
 void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
                    void *arg);
 
-int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
-            struct nlattr *mx, int mx_len);
-
+int fib6_add(struct fib6_node *root, struct rt6_info *rt,
+            struct nl_info *info, struct mx6_config *mxc);
 int fib6_del(struct rt6_info *rt, struct nl_info *info);
 
 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info);
index 9326c41c2d7f9e0512e9172569ba407ca59853bd..76c091b53daef0c44105a99d843dba281fd08d31 100644 (file)
@@ -70,6 +70,7 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
 __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
                             const struct in6_addr *raddr);
+struct net *ip6_tnl_get_link_net(const struct net_device *dev);
 
 static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 {
index 25a59eb388a6493ea0b329a29ca4ccb7147018dd..2c47061a6954543abe462ece352e93cdd0538033 100644 (file)
@@ -97,7 +97,10 @@ struct ip_tunnel {
 #define TUNNEL_DONT_FRAGMENT    __cpu_to_be16(0x0100)
 #define TUNNEL_OAM             __cpu_to_be16(0x0200)
 #define TUNNEL_CRIT_OPT                __cpu_to_be16(0x0400)
-#define TUNNEL_OPTIONS_PRESENT __cpu_to_be16(0x0800)
+#define TUNNEL_GENEVE_OPT      __cpu_to_be16(0x0800)
+#define TUNNEL_VXLAN_OPT       __cpu_to_be16(0x1000)
+
+#define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT)
 
 struct tnl_ptk_info {
        __be16 flags;
@@ -138,6 +141,7 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
 int ip_tunnel_init(struct net_device *dev);
 void ip_tunnel_uninit(struct net_device *dev);
 void  ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
+struct net *ip_tunnel_get_link_net(const struct net_device *dev);
 int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
                       struct rtnl_link_ops *ops, char *devname);
 
index 4292929392b0127479c49c5da3bb33f053f4428c..8ae7c9edbd3c90a17d541347a9ddb5cb22c6f662 100644 (file)
@@ -671,6 +671,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
        return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
 }
 
+u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
+                       struct in6_addr *src);
+void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
 void ipv6_proxy_select_ident(struct sk_buff *skb);
 
 int ip6_dst_hoplimit(struct dst_entry *dst);
@@ -708,7 +711,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
                                        __be32 flowlabel, bool autolabel)
 {
        if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
-               __be32 hash;
+               u32 hash;
 
                hash = skb_get_hash(skb);
 
@@ -718,7 +721,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
                 */
                hash ^= hash >> 12;
 
-               flowlabel = hash & IPV6_FLOWLABEL_MASK;
+               flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
        }
 
        return flowlabel;
@@ -788,6 +791,25 @@ int ip6_push_pending_frames(struct sock *sk);
 
 void ip6_flush_pending_frames(struct sock *sk);
 
+int ip6_send_skb(struct sk_buff *skb);
+
+struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue,
+                              struct inet_cork_full *cork,
+                              struct inet6_cork *v6_cork);
+struct sk_buff *ip6_make_skb(struct sock *sk,
+                            int getfrag(void *from, char *to, int offset,
+                                        int len, int odd, struct sk_buff *skb),
+                            void *from, int length, int transhdrlen,
+                            int hlimit, int tclass, struct ipv6_txoptions *opt,
+                            struct flowi6 *fl6, struct rt6_info *rt,
+                            unsigned int flags, int dontfrag);
+
+static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
+{
+       return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork,
+                             &inet6_sk(sk)->cork);
+}
+
 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
                                      const struct in6_addr *final_dst);
index 275ee56152ade074418d5485fdc74ab38db8d8d3..d52914b75331a7bf2a384e10d9ec4a4ce427260a 100644 (file)
@@ -376,6 +376,12 @@ enum ieee80211_rssi_event {
  * @ssid_len: Length of SSID given in @ssid.
  * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
  * @txpower: TX power in dBm
+ * @txpower_type: TX power adjustment used to control per packet Transmit
+ *     Power Control (TPC) in lower driver for the current vif. In particular
+ *     TPC is enabled if value passed in %txpower_type is
+ *     NL80211_TX_POWER_LIMITED (allow using less than specified from
+ *     userspace), whereas TPC is disabled if %txpower_type is set to
+ *     NL80211_TX_POWER_FIXED (use value configured from userspace)
  * @p2p_noa_attr: P2P NoA attribute for P2P powersave
  */
 struct ieee80211_bss_conf {
@@ -411,6 +417,7 @@ struct ieee80211_bss_conf {
        size_t ssid_len;
        bool hidden_ssid;
        int txpower;
+       enum nl80211_tx_power_setting txpower_type;
        struct ieee80211_p2p_noa_attr p2p_noa_attr;
 };
 
@@ -1287,8 +1294,8 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
  * @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates
  *     that the key is pairwise rather then a shared key.
  * @IEEE80211_KEY_FLAG_SW_MGMT_TX: This flag should be set by the driver for a
- *     CCMP key if it requires CCMP encryption of management frames (MFP) to
- *     be done in software.
+ *     CCMP/GCMP key if it requires CCMP/GCMP encryption of management frames
+ *     (MFP) to be done in software.
  * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
  *     if space should be prepared for the IV, but the IV
  *     itself should not be generated. Do not set together with
@@ -1303,7 +1310,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
  *     RX, if your crypto engine can't deal with TX you can also set the
  *     %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
  * @IEEE80211_KEY_FLAG_GENERATE_IV_MGMT: This flag should be set by the
- *     driver for a CCMP key to indicate that is requires IV generation
+ *     driver for a CCMP/GCMP key to indicate that is requires IV generation
  *     only for managment frames (MFP).
  * @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the
  *     driver for a key to indicate that sufficient tailroom must always
@@ -1634,6 +1641,12 @@ struct ieee80211_tx_control {
  *     be created.  It is expected user-space will create vifs as
  *     desired (and thus have them named as desired).
  *
+ * @IEEE80211_HW_SW_CRYPTO_CONTROL: The driver wants to control which of the
+ *     crypto algorithms can be done in software - so don't automatically
+ *     try to fall back to it if hardware crypto fails, but do so only if
+ *     the driver returns 1. This also forces the driver to advertise its
+ *     supported cipher suites.
+ *
  * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface
  *     queue mapping in order to use different queues (not just one per AC)
  *     for different virtual interfaces. See the doc section on HW queue
@@ -1681,6 +1694,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_MFP_CAPABLE                        = 1<<13,
        IEEE80211_HW_WANT_MONITOR_VIF                   = 1<<14,
        IEEE80211_HW_NO_AUTO_VIF                        = 1<<15,
+       IEEE80211_HW_SW_CRYPTO_CONTROL                  = 1<<16,
        /* free slots */
        IEEE80211_HW_REPORTS_TX_ACK_STATUS              = 1<<18,
        IEEE80211_HW_CONNECTION_MONITOR                 = 1<<19,
@@ -1955,6 +1969,11 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
  * added; if you return 0 then hw_key_idx must be assigned to the
  * hardware key index, you are free to use the full u8 range.
  *
+ * Note that in the case that the @IEEE80211_HW_SW_CRYPTO_CONTROL flag is
+ * set, mac80211 will not automatically fall back to software crypto if
+ * enabling hardware crypto failed. The set_key() call may also return the
+ * value 1 to permit this specific key/algorithm to be done in software.
+ *
  * When the cmd is %DISABLE_KEY then it must succeed.
  *
  * Note that it is permissible to not decrypt a frame even if a key
@@ -4079,6 +4098,10 @@ void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
  *     reverse order than in packet)
  * @aes_cmac: PN data, most significant byte first (big endian,
  *     reverse order than in packet)
+ * @aes_gmac: PN data, most significant byte first (big endian,
+ *     reverse order than in packet)
+ * @gcmp: PN data, most significant byte first (big endian,
+ *     reverse order than in packet)
  */
 struct ieee80211_key_seq {
        union {
@@ -4092,6 +4115,12 @@ struct ieee80211_key_seq {
                struct {
                        u8 pn[6];
                } aes_cmac;
+               struct {
+                       u8 pn[6];
+               } aes_gmac;
+               struct {
+                       u8 pn[6];
+               } gcmp;
        };
 };
 
@@ -4116,7 +4145,7 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
  * ieee80211_get_key_rx_seq - get key RX sequence counter
  *
  * @keyconf: the parameter passed with the set key
- * @tid: The TID, or -1 for the management frame value (CCMP only);
+ * @tid: The TID, or -1 for the management frame value (CCMP/GCMP only);
  *     the value on TID 0 is also used for non-QoS frames. For
  *     CMAC, only TID 0 is valid.
  * @seq: buffer to receive the sequence data
@@ -4152,7 +4181,7 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
  * ieee80211_set_key_rx_seq - set key RX sequence counter
  *
  * @keyconf: the parameter passed with the set key
- * @tid: The TID, or -1 for the management frame value (CCMP only);
+ * @tid: The TID, or -1 for the management frame value (CCMP/GCMP only);
  *     the value on TID 0 is also used for non-QoS frames. For
  *     CMAC, only TID 0 is valid.
  * @seq: new sequence data
index 2e8756b8c77543391b1c3381fa442c2a2a1c582e..36faf4990c4b6f2604fd2b00178c941d75f540a8 100644 (file)
@@ -60,6 +60,7 @@ struct net {
        struct list_head        exit_list;      /* Use only net_mutex */
 
        struct user_namespace   *user_ns;       /* Owning user namespace */
+       struct idr              netns_ids;
 
        struct ns_common        ns;
 
@@ -290,6 +291,9 @@ static inline struct net *read_pnet(struct net * const *pnet)
 #define __net_initconst        __initconst
 #endif
 
+int peernet2id(struct net *net, struct net *peer);
+struct net *get_net_ns_by_id(struct net *net, int id);
+
 struct pernet_operations {
        struct list_head list;
        int (*init)(struct net *net);
index f0daed2b54d139146649b44f3973969ebee57e60..74f271a172dd181ad0dd521395e5480f29250cff 100644 (file)
@@ -191,8 +191,6 @@ __nf_conntrack_find(struct net *net, u16 zone,
 int nf_conntrack_hash_check_insert(struct nf_conn *ct);
 bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
 
-void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
-
 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
                       u_int16_t l3num, struct nf_conntrack_tuple *tuple);
 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
index 3ae969e3acf016474413e1209381c60091befe1a..9eaaa788458607004cb5f160e77c38de02da17ec 100644 (file)
@@ -530,6 +530,8 @@ enum nft_chain_type {
 
 int nft_chain_validate_dependency(const struct nft_chain *chain,
                                  enum nft_chain_type type);
+int nft_chain_validate_hooks(const struct nft_chain *chain,
+                             unsigned int hook_flags);
 
 struct nft_stats {
        u64                     bytes;
index 64158353ecb2750a3165dc07e915755ccb801522..e010ee8da41ded34ae6e407cfab40734031b73aa 100644 (file)
@@ -490,14 +490,10 @@ static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
  * Corrects the netlink message header to include the appeneded
  * attributes. Only necessary if attributes have been added to
  * the message.
- *
- * Returns the total data length of the skb.
  */
-static inline int nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
+static inline void nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        nlh->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
-
-       return skb->len;
 }
 
 /**
@@ -520,8 +516,10 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
  */
 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
 {
-       if (mark)
+       if (mark) {
+               WARN_ON((unsigned char *) mark < skb->data);
                skb_trim(skb, (unsigned char *) mark - skb->data);
+       }
 }
 
 /**
index 24945cefc4fde6bfaf9c4560080c91b2e3b12d0d..e0bdcb1473269d0d18e67b77fb773137bbbac087 100644 (file)
@@ -48,10 +48,12 @@ struct netns_ipv4 {
        struct hlist_head       *fib_table_hash;
        struct sock             *fibnl;
 
-       struct sock             **icmp_sk;
+       struct sock  * __percpu *icmp_sk;
+
        struct inet_peer_base   *peers;
        struct tcpm_hash_bucket *tcp_metrics_hash;
        unsigned int            tcp_metrics_hash_log;
+       struct sock  * __percpu *tcp_sk;
        struct netns_frags      frags;
 #ifdef CONFIG_NETFILTER
        struct xt_table         *iptable_filter;
index 14bd0e1c47fae31ba0ae70669dde92741a81cc64..ab672b537dd4618dc25c31d1e10f400c96284fd2 100644 (file)
@@ -51,8 +51,10 @@ struct nfc_hci_ops {
        int (*tm_send)(struct nfc_hci_dev *hdev, struct sk_buff *skb);
        int (*check_presence)(struct nfc_hci_dev *hdev,
                              struct nfc_target *target);
-       int (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event,
+       int (*event_received)(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
                              struct sk_buff *skb);
+       void (*cmd_received)(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+                           struct sk_buff *skb);
        int (*fw_download)(struct nfc_hci_dev *hdev, const char *firmware_name);
        int (*discover_se)(struct nfc_hci_dev *dev);
        int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx);
@@ -63,8 +65,10 @@ struct nfc_hci_ops {
 };
 
 /* Pipes */
-#define NFC_HCI_INVALID_PIPE   0x80
 #define NFC_HCI_DO_NOT_CREATE_PIPE     0x81
+#define NFC_HCI_INVALID_PIPE   0x80
+#define NFC_HCI_INVALID_GATE   0xFF
+#define NFC_HCI_INVALID_HOST   0x80
 #define NFC_HCI_LINK_MGMT_PIPE 0x00
 #define NFC_HCI_ADMIN_PIPE     0x01
 
@@ -73,7 +77,13 @@ struct nfc_hci_gate {
        u8 pipe;
 };
 
+struct nfc_hci_pipe {
+       u8 gate;
+       u8 dest_host;
+};
+
 #define NFC_HCI_MAX_CUSTOM_GATES       50
+#define NFC_HCI_MAX_PIPES              127
 struct nfc_hci_init_data {
        u8 gate_count;
        struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
@@ -125,6 +135,7 @@ struct nfc_hci_dev {
        void *clientdata;
 
        u8 gate2pipe[NFC_HCI_MAX_GATES];
+       struct nfc_hci_pipe pipes[NFC_HCI_MAX_PIPES];
 
        u8 sw_romlib;
        u8 sw_patch;
@@ -167,6 +178,8 @@ void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev);
 void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err);
 
 int nfc_hci_result_to_errno(u8 result);
+void nfc_hci_reset_pipes(struct nfc_hci_dev *dev);
+void nfc_hci_reset_pipes_per_host(struct nfc_hci_dev *hdev, u8 host);
 
 /* Host IDs */
 #define NFC_HCI_HOST_CONTROLLER_ID     0x00
@@ -219,6 +232,12 @@ int nfc_hci_result_to_errno(u8 result);
 #define NFC_HCI_EVT_POST_DATA                  0x02
 #define NFC_HCI_EVT_HOT_PLUG                   0x03
 
+/* Generic commands */
+#define NFC_HCI_ANY_SET_PARAMETER      0x01
+#define NFC_HCI_ANY_GET_PARAMETER      0x02
+#define NFC_HCI_ANY_OPEN_PIPE          0x03
+#define NFC_HCI_ANY_CLOSE_PIPE         0x04
+
 /* Reader RF gates events */
 #define NFC_HCI_EVT_READER_REQUESTED   0x10
 #define NFC_HCI_EVT_END_OPERATION      0x11
@@ -249,8 +268,6 @@ int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
 int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
                           const u8 *param, size_t param_len,
                           data_exchange_cb_t cb, void *cb_context);
-int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response,
-                         const u8 *param, size_t param_len);
 int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
                       const u8 *param, size_t param_len);
 int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate);
index e7257a4653b40a9c4a1c15d938a0ede07ac94fcd..a2f2f3d3196dfc8c3b10719d4833b2c084d2bf64 100644 (file)
 #define NCI_STATUS_NFCEE_PROTOCOL_ERROR                                0xc2
 #define NCI_STATUS_NFCEE_TIMEOUT_ERROR                         0xc3
 
+/* NFCEE Interface/Protocols */
+#define NCI_NFCEE_INTERFACE_APDU           0x00
+#define NCI_NFCEE_INTERFACE_HCI_ACCESS     0x01
+#define NCI_NFCEE_INTERFACE_TYPE3_CMD_SET  0x02
+#define NCI_NFCEE_INTERFACE_TRANSPARENT        0x03
+
+/* Destination type */
+#define NCI_DESTINATION_NFCC_LOOPBACK      0x01
+#define NCI_DESTINATION_REMOTE_NFC_ENDPOINT    0x02
+#define NCI_DESTINATION_NFCEE              0x03
+
+/* Destination-specific parameters type */
+#define NCI_DESTINATION_SPECIFIC_PARAM_RF_TYPE     0x00
+#define NCI_DESTINATION_SPECIFIC_PARAM_NFCEE_TYPE  0x01
+
+/* NFCEE Discovery Action */
+#define NCI_NFCEE_DISCOVERY_ACTION_DISABLE                     0x00
+#define NCI_NFCEE_DISCOVERY_ACTION_ENABLE                      0x01
+
 /* NCI RF Technology and Mode */
 #define NCI_NFC_A_PASSIVE_POLL_MODE                            0x00
 #define NCI_NFC_B_PASSIVE_POLL_MODE                            0x01
@@ -224,6 +243,28 @@ struct nci_core_set_config_cmd {
        struct  set_config_param param; /* support 1 param per cmd is enough */
 } __packed;
 
+#define NCI_OP_CORE_CONN_CREATE_CMD    nci_opcode_pack(NCI_GID_CORE, 0x04)
+#define DEST_SPEC_PARAMS_ID_INDEX      0
+#define DEST_SPEC_PARAMS_PROTOCOL_INDEX        1
+struct dest_spec_params {
+       __u8    id;
+       __u8    protocol;
+} __packed;
+
+struct core_conn_create_dest_spec_params {
+       __u8    type;
+       __u8    length;
+       __u8    value[0];
+} __packed;
+
+struct nci_core_conn_create_cmd {
+       __u8    destination_type;
+       __u8    number_destination_params;
+       struct core_conn_create_dest_spec_params params[0];
+} __packed;
+
+#define NCI_OP_CORE_CONN_CLOSE_CMD     nci_opcode_pack(NCI_GID_CORE, 0x05)
+
 #define NCI_OP_RF_DISCOVER_MAP_CMD     nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
 struct disc_map_config {
        __u8    rf_protocol;
@@ -260,6 +301,19 @@ struct nci_rf_deactivate_cmd {
        __u8    type;
 } __packed;
 
+#define NCI_OP_NFCEE_DISCOVER_CMD nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00)
+struct nci_nfcee_discover_cmd {
+       __u8    discovery_action;
+} __packed;
+
+#define NCI_OP_NFCEE_MODE_SET_CMD nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x01)
+#define NCI_NFCEE_DISABLE      0x00
+#define NCI_NFCEE_ENABLE       0x01
+struct nci_nfcee_mode_set_cmd {
+       __u8    nfcee_id;
+       __u8    nfcee_mode;
+} __packed;
+
 /* ----------------------- */
 /* ---- NCI Responses ---- */
 /* ----------------------- */
@@ -295,6 +349,16 @@ struct nci_core_set_config_rsp {
        __u8    params_id[0];   /* variable size array */
 } __packed;
 
+#define NCI_OP_CORE_CONN_CREATE_RSP    nci_opcode_pack(NCI_GID_CORE, 0x04)
+struct nci_core_conn_create_rsp {
+       __u8    status;
+       __u8    max_ctrl_pkt_payload_len;
+       __u8    credits_cnt;
+       __u8    conn_id;
+} __packed;
+
+#define NCI_OP_CORE_CONN_CLOSE_RSP     nci_opcode_pack(NCI_GID_CORE, 0x05)
+
 #define NCI_OP_RF_DISCOVER_MAP_RSP     nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
 
 #define NCI_OP_RF_DISCOVER_RSP         nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
@@ -303,6 +367,13 @@ struct nci_core_set_config_rsp {
 
 #define NCI_OP_RF_DEACTIVATE_RSP       nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
 
+#define NCI_OP_NFCEE_DISCOVER_RSP nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00)
+struct nci_nfcee_discover_rsp {
+       __u8    status;
+       __u8    num_nfcee;
+} __packed;
+
+#define NCI_OP_NFCEE_MODE_SET_RSP nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x01)
 /* --------------------------- */
 /* ---- NCI Notifications ---- */
 /* --------------------------- */
@@ -430,4 +501,30 @@ struct nci_rf_deactivate_ntf {
        __u8    reason;
 } __packed;
 
+#define NCI_OP_RF_NFCEE_ACTION_NTF     nci_opcode_pack(NCI_GID_RF_MGMT, 0x09)
+struct nci_rf_nfcee_action_ntf {
+       __u8 nfcee_id;
+       __u8 trigger;
+       __u8 supported_data_length;
+       __u8 supported_data[0];
+} __packed;
+
+#define NCI_OP_NFCEE_DISCOVER_NTF nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00)
+struct nci_nfcee_supported_protocol {
+       __u8    num_protocol;
+       __u8    supported_protocol[0];
+} __packed;
+
+struct nci_nfcee_information_tlv {
+       __u8    num_tlv;
+       __u8    information_tlv[0];
+} __packed;
+
+struct nci_nfcee_discover_ntf {
+       __u8    nfcee_id;
+       __u8    nfcee_status;
+       struct nci_nfcee_supported_protocol supported_protocols;
+       struct nci_nfcee_information_tlv        information_tlv;
+} __packed;
+
 #endif /* __NCI_H */
index 9e51bb4d841ea4915c2e70f0d7187b656ae27cab..ff87f8611fa3246d4570c9c780d29f8ee4ff0f6c 100644 (file)
@@ -78,15 +78,107 @@ struct nci_ops {
        int   (*se_io)(struct nci_dev *ndev, u32 se_idx,
                                u8 *apdu, size_t apdu_length,
                                se_io_cb_t cb, void *cb_context);
+       int   (*hci_load_session)(struct nci_dev *ndev);
+       void  (*hci_event_received)(struct nci_dev *ndev, u8 pipe, u8 event,
+                                   struct sk_buff *skb);
+       void  (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd,
+                                 struct sk_buff *skb);
 };
 
 #define NCI_MAX_SUPPORTED_RF_INTERFACES                4
 #define NCI_MAX_DISCOVERED_TARGETS             10
+#define NCI_MAX_NUM_NFCEE   255
+#define NCI_MAX_CONN_ID                7
+
+struct nci_conn_info {
+       struct list_head list;
+       __u8    id; /* can be an RF Discovery ID or an NFCEE ID */
+       __u8    conn_id;
+       __u8    max_pkt_payload_len;
+
+       atomic_t credits_cnt;
+       __u8     initial_num_credits;
+
+       data_exchange_cb_t      data_exchange_cb;
+       void *data_exchange_cb_context;
+
+       struct sk_buff *rx_skb;
+};
+
+#define NCI_INVALID_CONN_ID 0x80
+
+#define NCI_HCI_ANY_OPEN_PIPE      0x03
+
+/* Gates */
+#define NCI_HCI_ADMIN_GATE         0x00
+#define NCI_HCI_LINK_MGMT_GATE     0x06
+
+/* Pipes */
+#define NCI_HCI_LINK_MGMT_PIPE             0x00
+#define NCI_HCI_ADMIN_PIPE                 0x01
+
+/* Generic responses */
+#define NCI_HCI_ANY_OK                     0x00
+#define NCI_HCI_ANY_E_NOT_CONNECTED        0x01
+#define NCI_HCI_ANY_E_CMD_PAR_UNKNOWN      0x02
+#define NCI_HCI_ANY_E_NOK                  0x03
+#define NCI_HCI_ANY_E_PIPES_FULL           0x04
+#define NCI_HCI_ANY_E_REG_PAR_UNKNOWN      0x05
+#define NCI_HCI_ANY_E_PIPE_NOT_OPENED      0x06
+#define NCI_HCI_ANY_E_CMD_NOT_SUPPORTED    0x07
+#define NCI_HCI_ANY_E_INHIBITED            0x08
+#define NCI_HCI_ANY_E_TIMEOUT              0x09
+#define NCI_HCI_ANY_E_REG_ACCESS_DENIED    0x0a
+#define NCI_HCI_ANY_E_PIPE_ACCESS_DENIED   0x0b
+
+#define NCI_HCI_DO_NOT_OPEN_PIPE           0x81
+#define NCI_HCI_INVALID_PIPE               0x80
+#define NCI_HCI_INVALID_GATE               0xFF
+#define NCI_HCI_INVALID_HOST               0x80
+
+#define NCI_HCI_MAX_CUSTOM_GATES   50
+#define NCI_HCI_MAX_PIPES          127
+
+struct nci_hci_gate {
+       u8 gate;
+       u8 pipe;
+       u8 dest_host;
+} __packed;
+
+struct nci_hci_pipe {
+       u8 gate;
+       u8 host;
+} __packed;
+
+struct nci_hci_init_data {
+       u8 gate_count;
+       struct nci_hci_gate gates[NCI_HCI_MAX_CUSTOM_GATES];
+       char session_id[9];
+};
+
+#define NCI_HCI_MAX_GATES          256
+
+struct nci_hci_dev {
+       u8 nfcee_id;
+       struct nci_dev *ndev;
+       struct nci_conn_info *conn_info;
+
+       struct nci_hci_init_data init_data;
+       struct nci_hci_pipe pipes[NCI_HCI_MAX_PIPES];
+       u8 gate2pipe[NCI_HCI_MAX_GATES];
+       int expected_pipes;
+       int count_pipes;
+
+       struct sk_buff_head rx_hcp_frags;
+       struct work_struct msg_rx_work;
+       struct sk_buff_head msg_rx_queue;
+};
 
 /* NCI Core structures */
 struct nci_dev {
        struct nfc_dev          *nfc_dev;
        struct nci_ops          *ops;
+       struct nci_hci_dev      *hci_dev;
 
        int                     tx_headroom;
        int                     tx_tailroom;
@@ -95,7 +187,10 @@ struct nci_dev {
        unsigned long           flags;
 
        atomic_t                cmd_cnt;
-       atomic_t                credits_cnt;
+       __u8                    cur_conn_id;
+
+       struct list_head        conn_info_list;
+       struct nci_conn_info    *rf_conn_info;
 
        struct timer_list       cmd_timer;
        struct timer_list       data_timer;
@@ -141,13 +236,10 @@ struct nci_dev {
        __u8                    manufact_id;
        __u32                   manufact_specific_info;
 
-       /* received during NCI_OP_RF_INTF_ACTIVATED_NTF */
-       __u8                    max_data_pkt_payload_size;
-       __u8                    initial_num_credits;
+       /* Save RF Discovery ID or NFCEE ID under conn_create */
+       __u8                    cur_id;
 
        /* stored during nci_data_exchange */
-       data_exchange_cb_t      data_exchange_cb;
-       void                    *data_exchange_cb_context;
        struct sk_buff          *rx_data_reassembly;
 
        /* stored during intf_activated_ntf */
@@ -163,9 +255,36 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
 void nci_free_device(struct nci_dev *ndev);
 int nci_register_device(struct nci_dev *ndev);
 void nci_unregister_device(struct nci_dev *ndev);
+int nci_request(struct nci_dev *ndev,
+               void (*req)(struct nci_dev *ndev,
+                           unsigned long opt),
+               unsigned long opt, __u32 timeout);
 int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
 int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
 
+int nci_nfcee_discover(struct nci_dev *ndev, u8 action);
+int nci_nfcee_mode_set(struct nci_dev *ndev, u8 nfcee_id, u8 nfcee_mode);
+int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type,
+                        u8 number_destination_params,
+                        size_t params_len,
+                        struct core_conn_create_dest_spec_params *params);
+int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id);
+
+struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
+int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
+                      const u8 *param, size_t param_len);
+int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate,
+                    u8 cmd, const u8 *param, size_t param_len,
+                    struct sk_buff **skb);
+int nci_hci_open_pipe(struct nci_dev *ndev, u8 pipe);
+int nci_hci_connect_gate(struct nci_dev *ndev, u8 dest_host,
+                        u8 dest_gate, u8 pipe);
+int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx,
+                     const u8 *param, size_t param_len);
+int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx,
+                     struct sk_buff **skb);
+int nci_hci_dev_session_init(struct nci_dev *ndev);
+
 static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev,
                                            unsigned int len,
                                            gfp_t how)
@@ -200,7 +319,9 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb);
 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload);
 int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb);
 void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
-                               int err);
+                               __u8 conn_id, int err);
+void nci_hci_data_received_cb(void *context, struct sk_buff *skb, int err);
+
 void nci_clear_target_list(struct nci_dev *ndev);
 
 /* ----- NCI requests ----- */
@@ -209,6 +330,8 @@ void nci_clear_target_list(struct nci_dev *ndev);
 #define NCI_REQ_CANCELED       2
 
 void nci_req_complete(struct nci_dev *ndev, int result);
+struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev,
+                                                  int conn_id);
 
 /* ----- NCI status code ----- */
 int nci_to_errno(__u8 code);
index 12adb817c27a3f157f351915e38ecc131fd52e82..73190e65d5c13aa7217cce910203f4bd5f63d385 100644 (file)
@@ -135,6 +135,31 @@ struct nfc_se {
        u16 state;
 };
 
+/**
+ * nfc_evt_transaction - A struct for NFC secure element event transaction.
+ *
+ * @aid: The application identifier triggering the event
+ *
+ * @aid_len: The application identifier length [5:16]
+ *
+ * @params: The application parameters transmitted during the transaction
+ *
+ * @params_len: The applications parameters length [0:255]
+ *
+ */
+#define NFC_MIN_AID_LENGTH     5
+#define        NFC_MAX_AID_LENGTH      16
+#define NFC_MAX_PARAMS_LENGTH  255
+
+#define NFC_EVT_TRANSACTION_AID_TAG    0x81
+#define NFC_EVT_TRANSACTION_PARAMS_TAG 0x82
+struct nfc_evt_transaction {
+       u32 aid_len;
+       u8 aid[NFC_MAX_AID_LENGTH];
+       u8 params_len;
+       u8 params[NFC_MAX_PARAMS_LENGTH];
+} __packed;
+
 struct nfc_genl_data {
        u32 poll_req_portid;
        struct mutex genl_data_mutex;
@@ -262,6 +287,8 @@ int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb);
 
 void nfc_driver_failure(struct nfc_dev *dev, int err);
 
+int nfc_se_transaction(struct nfc_dev *dev, u8 se_idx,
+                      struct nfc_evt_transaction *evt_transaction);
 int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type);
 int nfc_remove_se(struct nfc_dev *dev, u32 se_idx);
 struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx);
index f074060bc5de763a3488054db67a67e7a0a9ab00..cc16d413f681c077f743547af38ab8d3293a0007 100644 (file)
@@ -59,7 +59,7 @@ extern struct pingv6_ops pingv6_ops;
 
 struct pingfakehdr {
        struct icmphdr icmph;
-       struct iovec *iov;
+       struct msghdr *msg;
        sa_family_t family;
        __wsum wcheck;
 };
index 27a33833ff4a942a2dde57140a6d67d520ffd7b1..2342bf12cb78a07fb44cbd389fb548cf38801ec3 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/jiffies.h>
 #include <linux/ktime.h>
+#include <linux/if_vlan.h>
 #include <net/sch_generic.h>
 
 struct qdisc_walker {
@@ -114,6 +115,17 @@ int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
 int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                struct tcf_result *res);
 
+static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
+{
+       /* We need to take extra care in case the skb came via
+        * vlan accelerated path. In that case, use skb->vlan_proto
+        * as the original vlan header was already stripped.
+        */
+       if (skb_vlan_tag_present(skb))
+               return skb->vlan_proto;
+       return skb->protocol;
+}
+
 /* Calculate maximal size of packet seen by hard_start_xmit
    routine of this device.
  */
index b17cf28f996e6ab3cee2b73b6747eafde6f02380..fe22d03afb6a218b6b2dfa7d5329632b8d4936ae 100644 (file)
@@ -46,6 +46,7 @@
 
 struct fib_nh;
 struct fib_info;
+struct uncached_list;
 struct rtable {
        struct dst_entry        dst;
 
@@ -64,6 +65,7 @@ struct rtable {
        u32                     rt_pmtu;
 
        struct list_head        rt_uncached;
+       struct uncached_list    *rt_uncached_list;
 };
 
 static inline bool rt_is_input_route(const struct rtable *rt)
index e21b9f9653c011fe11634e2e5e745d4e5d7a9bf9..6c6d5393fc349b46c384aa033554e4d12682040d 100644 (file)
@@ -46,6 +46,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
  *                         to create when creating a new device.
  *     @get_num_rx_queues: Function to determine number of receive queues
  *                         to create when creating a new device.
+ *     @get_link_net: Function to get the i/o netns of the device
  */
 struct rtnl_link_ops {
        struct list_head        list;
@@ -93,6 +94,7 @@ struct rtnl_link_ops {
        int                     (*fill_slave_info)(struct sk_buff *skb,
                                                   const struct net_device *dev,
                                                   const struct net_device *slave_dev);
+       struct net              *(*get_link_net)(const struct net_device *dev);
 };
 
 int __rtnl_link_register(struct rtnl_link_ops *ops);
index 3d282cbb66bf1015d28f140ed3bc031ac43afaed..c605d305c577074d11bee6f19479dda8a4949ee3 100644 (file)
@@ -79,6 +79,9 @@ struct Qdisc {
        struct netdev_queue     *dev_queue;
 
        struct gnet_stats_rate_est64    rate_est;
+       struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+       struct gnet_stats_queue __percpu *cpu_qstats;
+
        struct Qdisc            *next_sched;
        struct sk_buff          *gso_skb;
        /*
@@ -86,15 +89,9 @@ struct Qdisc {
         */
        unsigned long           state;
        struct sk_buff_head     q;
-       union {
-               struct gnet_stats_basic_packed bstats;
-               struct gnet_stats_basic_cpu __percpu *cpu_bstats;
-       } __packed;
+       struct gnet_stats_basic_packed bstats;
        unsigned int            __state;
-       union {
-               struct gnet_stats_queue qstats;
-               struct gnet_stats_queue __percpu *cpu_qstats;
-       } __packed;
+       struct gnet_stats_queue qstats;
        struct rcu_head         rcu_head;
        int                     padded;
        atomic_t                refcnt;
index 2210fec65669c870384fe87f1d536eca34ab3586..e13824570b0fdc8bf8d4f45de506e8a53aca7adc 100644 (file)
@@ -857,18 +857,6 @@ static inline void sock_rps_record_flow_hash(__u32 hash)
 #endif
 }
 
-static inline void sock_rps_reset_flow_hash(__u32 hash)
-{
-#ifdef CONFIG_RPS
-       struct rps_sock_flow_table *sock_flow_table;
-
-       rcu_read_lock();
-       sock_flow_table = rcu_dereference(rps_sock_flow_table);
-       rps_reset_sock_flow(sock_flow_table, hash);
-       rcu_read_unlock();
-#endif
-}
-
 static inline void sock_rps_record_flow(const struct sock *sk)
 {
 #ifdef CONFIG_RPS
@@ -876,28 +864,18 @@ static inline void sock_rps_record_flow(const struct sock *sk)
 #endif
 }
 
-static inline void sock_rps_reset_flow(const struct sock *sk)
-{
-#ifdef CONFIG_RPS
-       sock_rps_reset_flow_hash(sk->sk_rxhash);
-#endif
-}
-
 static inline void sock_rps_save_rxhash(struct sock *sk,
                                        const struct sk_buff *skb)
 {
 #ifdef CONFIG_RPS
-       if (unlikely(sk->sk_rxhash != skb->hash)) {
-               sock_rps_reset_flow(sk);
+       if (unlikely(sk->sk_rxhash != skb->hash))
                sk->sk_rxhash = skb->hash;
-       }
 #endif
 }
 
 static inline void sock_rps_reset_rxhash(struct sock *sk)
 {
 #ifdef CONFIG_RPS
-       sock_rps_reset_flow(sk);
        sk->sk_rxhash = 0;
 #endif
 }
@@ -1374,29 +1352,6 @@ void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
 #define SOCK_BINDADDR_LOCK     4
 #define SOCK_BINDPORT_LOCK     8
 
-/* sock_iocb: used to kick off async processing of socket ios */
-struct sock_iocb {
-       struct list_head        list;
-
-       int                     flags;
-       int                     size;
-       struct socket           *sock;
-       struct sock             *sk;
-       struct scm_cookie       *scm;
-       struct msghdr           *msg, async_msg;
-       struct kiocb            *kiocb;
-};
-
-static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
-{
-       return (struct sock_iocb *)iocb->private;
-}
-
-static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
-{
-       return si->kiocb;
-}
-
 struct socket_alloc {
        struct socket socket;
        struct inode vfs_inode;
@@ -1826,27 +1781,25 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
 }
 
 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
-                                          char __user *from, char *to,
+                                          struct iov_iter *from, char *to,
                                           int copy, int offset)
 {
        if (skb->ip_summed == CHECKSUM_NONE) {
-               int err = 0;
-               __wsum csum = csum_and_copy_from_user(from, to, copy, 0, &err);
-               if (err)
-                       return err;
+               __wsum csum = 0;
+               if (csum_and_copy_from_iter(to, copy, &csum, from) != copy)
+                       return -EFAULT;
                skb->csum = csum_block_add(skb->csum, csum, offset);
        } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
-               if (!access_ok(VERIFY_READ, from, copy) ||
-                   __copy_from_user_nocache(to, from, copy))
+               if (copy_from_iter_nocache(to, copy, from) != copy)
                        return -EFAULT;
-       } else if (copy_from_user(to, from, copy))
+       } else if (copy_from_iter(to, copy, from) != copy)
                return -EFAULT;
 
        return 0;
 }
 
 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
-                                      char __user *from, int copy)
+                                      struct iov_iter *from, int copy)
 {
        int err, offset = skb->len;
 
@@ -1858,7 +1811,7 @@ static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
        return err;
 }
 
-static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from,
+static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
                                           struct sk_buff *skb,
                                           struct page *page,
                                           int off, int copy)
@@ -2262,6 +2215,7 @@ bool sk_net_capable(const struct sock *sk, int cap);
 extern __u32 sysctl_wmem_max;
 extern __u32 sysctl_rmem_max;
 
+extern int sysctl_tstamp_allow_data;
 extern int sysctl_optmem_max;
 
 extern __u32 sysctl_wmem_default;
index 8a6d1641fd9bb8d1c3ddc053c479a9f0531b5b18..cfcdac2e5d253ef431bc18709cc5e7edd0e47e6f 100644 (file)
 #define _LINUX_SWITCHDEV_H_
 
 #include <linux/netdevice.h>
+#include <linux/notifier.h>
+
+enum netdev_switch_notifier_type {
+       NETDEV_SWITCH_FDB_ADD = 1,
+       NETDEV_SWITCH_FDB_DEL,
+};
+
+struct netdev_switch_notifier_info {
+       struct net_device *dev;
+};
+
+struct netdev_switch_notifier_fdb_info {
+       struct netdev_switch_notifier_info info; /* must be first */
+       const unsigned char *addr;
+       u16 vid;
+};
+
+static inline struct net_device *
+netdev_switch_notifier_info_to_dev(const struct netdev_switch_notifier_info *info)
+{
+       return info->dev;
+}
 
 #ifdef CONFIG_NET_SWITCHDEV
 
 int netdev_switch_parent_id_get(struct net_device *dev,
                                struct netdev_phys_item_id *psid);
 int netdev_switch_port_stp_update(struct net_device *dev, u8 state);
-
+int register_netdev_switch_notifier(struct notifier_block *nb);
+int unregister_netdev_switch_notifier(struct notifier_block *nb);
+int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
+                                struct netdev_switch_notifier_info *info);
+int netdev_switch_port_bridge_setlink(struct net_device *dev,
+                               struct nlmsghdr *nlh, u16 flags);
+int netdev_switch_port_bridge_dellink(struct net_device *dev,
+                               struct nlmsghdr *nlh, u16 flags);
+int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
+                                              struct nlmsghdr *nlh, u16 flags);
+int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
+                                              struct nlmsghdr *nlh, u16 flags);
 #else
 
 static inline int netdev_switch_parent_id_get(struct net_device *dev,
@@ -32,6 +65,50 @@ static inline int netdev_switch_port_stp_update(struct net_device *dev,
        return -EOPNOTSUPP;
 }
 
+static inline int register_netdev_switch_notifier(struct notifier_block *nb)
+{
+       return 0;
+}
+
+static inline int unregister_netdev_switch_notifier(struct notifier_block *nb)
+{
+       return 0;
+}
+
+static inline int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
+                                              struct netdev_switch_notifier_info *info)
+{
+       return NOTIFY_DONE;
+}
+
+static inline int netdev_switch_port_bridge_setlink(struct net_device *dev,
+                                                   struct nlmsghdr *nlh,
+                                                   u16 flags)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int netdev_switch_port_bridge_dellink(struct net_device *dev,
+                                                   struct nlmsghdr *nlh,
+                                                   u16 flags)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
+                                                       struct nlmsghdr *nlh,
+                                                       u16 flags)
+{
+       return 0;
+}
+
+static inline int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
+                                                       struct nlmsghdr *nlh,
+                                                       u16 flags)
+{
+       return 0;
+}
+
 #endif
 
 #endif /* _LINUX_SWITCHDEV_H_ */
diff --git a/include/net/tc_act/tc_bpf.h b/include/net/tc_act/tc_bpf.h
new file mode 100644 (file)
index 0000000..86a070f
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __NET_TC_BPF_H
+#define __NET_TC_BPF_H
+
+#include <linux/filter.h>
+#include <net/act_api.h>
+
+struct tcf_bpf {
+       struct tcf_common       common;
+       struct bpf_prog         *filter;
+       struct sock_filter      *bpf_ops;
+       u16                     bpf_num_ops;
+};
+#define to_bpf(a) \
+       container_of(a->priv, struct tcf_bpf, common)
+
+#endif /* __NET_TC_BPF_H */
diff --git a/include/net/tc_act/tc_connmark.h b/include/net/tc_act/tc_connmark.h
new file mode 100644 (file)
index 0000000..5c1104c
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef __NET_TC_CONNMARK_H
+#define __NET_TC_CONNMARK_H
+
+#include <net/act_api.h>
+
+struct tcf_connmark_info {
+       struct tcf_common common;
+       u16 zone;
+};
+
+#define to_connmark(a) \
+       container_of(a->priv, struct tcf_connmark_info, common)
+
+#endif /* __NET_TC_CONNMARK_H */
index f50f29faf76f1fbcc5237de63c76f7c8200f4a6b..da4196fb78dbf39c12a421332bc0405948fc26c9 100644 (file)
@@ -274,6 +274,7 @@ extern int sysctl_tcp_challenge_ack_limit;
 extern unsigned int sysctl_tcp_notsent_lowat;
 extern int sysctl_tcp_min_tso_segs;
 extern int sysctl_tcp_autocorking;
+extern int sysctl_tcp_invalid_ratelimit;
 
 extern atomic_long_t tcp_memory_allocated;
 extern struct percpu_counter tcp_sockets_allocated;
@@ -448,6 +449,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 struct sock *tcp_create_openreq_child(struct sock *sk,
                                      struct request_sock *req,
                                      struct sk_buff *skb);
+void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                                  struct request_sock *req,
                                  struct dst_entry *dst);
@@ -636,6 +638,11 @@ static inline u32 tcp_rto_min_us(struct sock *sk)
        return jiffies_to_usecs(tcp_rto_min(sk));
 }
 
+static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
+{
+       return dst_metric_locked(dst, RTAX_CC_ALGO);
+}
+
 /* Compute the actual receive window we are currently advertising.
  * Rcv_nxt can be after the window if our peer push more data
  * than the offered window.
@@ -787,6 +794,8 @@ enum tcp_ca_ack_event_flags {
 #define TCP_CA_MAX     128
 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
 
+#define TCP_CA_UNSPEC  0
+
 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
 #define TCP_CONG_NON_RESTRICTED 0x1
 /* Requires ECN/ECT set on all packets */
@@ -794,7 +803,8 @@ enum tcp_ca_ack_event_flags {
 
 struct tcp_congestion_ops {
        struct list_head        list;
-       unsigned long flags;
+       u32 key;
+       u32 flags;
 
        /* initialize private data (optional) */
        void (*init)(struct sock *sk);
@@ -834,13 +844,24 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
 void tcp_get_allowed_congestion_control(char *buf, size_t len);
 int tcp_set_allowed_congestion_control(char *allowed);
 int tcp_set_congestion_control(struct sock *sk, const char *name);
-void tcp_slow_start(struct tcp_sock *tp, u32 acked);
-void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
+u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
 
 u32 tcp_reno_ssthresh(struct sock *sk);
 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
 extern struct tcp_congestion_ops tcp_reno;
 
+struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
+u32 tcp_ca_get_key_by_name(const char *name);
+#ifdef CONFIG_INET
+char *tcp_ca_get_name_by_key(u32 key, char *buffer);
+#else
+static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
+{
+       return NULL;
+}
+#endif
+
 static inline bool tcp_ca_needs_ecn(const struct sock *sk)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1124,6 +1145,7 @@ static inline void tcp_openreq_init(struct request_sock *req,
        tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
        tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
        tcp_rsk(req)->snt_synack = tcp_time_stamp;
+       tcp_rsk(req)->last_oow_ack_time = 0;
        req->mss = rx_opt->mss_clamp;
        req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
        ireq->tstamp_ok = rx_opt->tstamp_ok;
@@ -1216,6 +1238,37 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
        return true;
 }
 
+/* Return true if we're currently rate-limiting out-of-window ACKs and
+ * thus shouldn't send a dupack right now. We rate-limit dupacks in
+ * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
+ * attacks that send repeated SYNs or ACKs for the same connection. To
+ * do this, we do not send a duplicate SYNACK or ACK if the remote
+ * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
+ */
+static inline bool tcp_oow_rate_limited(struct net *net,
+                                       const struct sk_buff *skb,
+                                       int mib_idx, u32 *last_oow_ack_time)
+{
+       /* Data packets without SYNs are not likely part of an ACK loop. */
+       if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
+           !tcp_hdr(skb)->syn)
+               goto not_rate_limited;
+
+       if (*last_oow_ack_time) {
+               s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+
+               if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
+                       NET_INC_STATS_BH(net, mib_idx);
+                       return true;    /* rate-limited: don't send yet! */
+               }
+       }
+
+       *last_oow_ack_time = tcp_time_stamp;
+
+not_rate_limited:
+       return false;   /* not rate-limited: go ahead, send dupack now! */
+}
+
 static inline void tcp_mib_init(struct net *net)
 {
        /* See RFC 2012 */
@@ -1693,4 +1746,19 @@ static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
        return dopt;
 }
 
+/* locally generated TCP pure ACKs have skb->truesize == 2
+ * (check tcp_send_ack() in net/ipv4/tcp_output.c )
+ * This is much faster than dissecting the packet to find out.
+ * (Think of GRE encapsulations, IPv4, IPv6, ...)
+ */
+static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
+{
+       return skb->truesize == 2;
+}
+
+static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
+{
+       skb->truesize = 2;
+}
+
 #endif /* _TCP_H */
index 2a50a70ef5870c76e0694ca460182671df46973e..1a20d33d56bc1ffb6d91826282ddabfc53d4fb25 100644 (file)
@@ -77,17 +77,17 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
                           struct udp_tunnel_sock_cfg *sock_cfg);
 
 /* Transmit the skb using UDP encapsulation. */
-int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt,
-                       struct sk_buff *skb, __be32 src, __be32 dst,
-                       __u8 tos, __u8 ttl, __be16 df, __be16 src_port,
-                       __be16 dst_port, bool xnet);
+int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+                       __be32 src, __be32 dst, __u8 tos, __u8 ttl,
+                       __be16 df, __be16 src_port, __be16 dst_port,
+                       bool xnet, bool nocheck);
 
 #if IS_ENABLED(CONFIG_IPV6)
-int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
-                        struct sk_buff *skb, struct net_device *dev,
-                        struct in6_addr *saddr, struct in6_addr *daddr,
+int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
+                        struct net_device *dev, struct in6_addr *saddr,
+                        struct in6_addr *daddr,
                         __u8 prio, __u8 ttl, __be16 src_port,
-                        __be16 dst_port);
+                        __be16 dst_port, bool nocheck);
 #endif
 
 void udp_tunnel_sock_release(struct socket *sock);
index ae7c8d1fbcadbccd635edc56a54dba6d94563170..80761938b9a78081822a4b82b4bd3fb30b5f6625 100644 (file)
@@ -20,8 +20,7 @@ static __inline__ int udplite_getfrag(void *from, char *to, int  offset,
                                      int len, int odd, struct sk_buff *skb)
 {
        struct msghdr *msg = from;
-       /* XXX: stripping const */
-       return memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len);
+       return copy_from_iter(to, len, &msg->msg_iter) != len ? -EFAULT : 0;
 }
 
 /* Designate sk as UDP-Lite socket */
index 903461aa5644ce9d1e366818fa6cbda1ba0cc1b1..2927d6244481ae24092ba829f857e57fffcaacfd 100644 (file)
 #define VNI_HASH_BITS  10
 #define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
 
-/* VXLAN protocol header */
+/*
+ * VXLAN Group Based Policy Extension:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |1|-|-|-|1|-|-|-|R|D|R|R|A|R|R|R|        Group Policy ID        |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                VXLAN Network Identifier (VNI) |   Reserved    |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * D = Don't Learn bit. When set, this bit indicates that the egress
+ *     VTEP MUST NOT learn the source address of the encapsulated frame.
+ *
+ * A = Indicates that the group policy has already been applied to
+ *     this packet. Policies MUST NOT be applied by devices when the
+ *     A bit is set.
+ *
+ * [0] https://tools.ietf.org/html/draft-smith-vxlan-group-policy
+ */
+struct vxlanhdr_gbp {
+       __u8    vx_flags;
+#ifdef __LITTLE_ENDIAN_BITFIELD
+       __u8    reserved_flags1:3,
+               policy_applied:1,
+               reserved_flags2:2,
+               dont_learn:1,
+               reserved_flags3:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+       __u8    reserved_flags1:1,
+               dont_learn:1,
+               reserved_flags2:2,
+               policy_applied:1,
+               reserved_flags3:3;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+       __be16  policy_id;
+       __be32  vx_vni;
+};
+
+#define VXLAN_GBP_USED_BITS (VXLAN_HF_GBP | 0xFFFFFF)
+
+/* skb->mark mapping
+ *
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|R|R|R|R|R|R|R|D|R|R|A|R|R|R|        Group Policy ID        |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+#define VXLAN_GBP_DONT_LEARN           (BIT(6) << 16)
+#define VXLAN_GBP_POLICY_APPLIED       (BIT(3) << 16)
+#define VXLAN_GBP_ID_MASK              (0xFFFF)
+
+/* VXLAN protocol header:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |G|R|R|R|I|R|R|C|               Reserved                        |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                VXLAN Network Identifier (VNI) |   Reserved    |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * G = 1       Group Policy (VXLAN-GBP)
+ * I = 1       VXLAN Network Identifier (VNI) present
+ * C = 1       Remote checksum offload (RCO)
+ */
 struct vxlanhdr {
        __be32 vx_flags;
        __be32 vx_vni;
 };
 
+/* VXLAN header flags. */
+#define VXLAN_HF_RCO BIT(24)
+#define VXLAN_HF_VNI BIT(27)
+#define VXLAN_HF_GBP BIT(31)
+
+/* Remote checksum offload header option */
+#define VXLAN_RCO_MASK  0x7f    /* Last byte of vni field */
+#define VXLAN_RCO_UDP   0x80    /* Indicate UDP RCO (TCP when not set *) */
+#define VXLAN_RCO_SHIFT 1       /* Left shift of start */
+#define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1)
+#define VXLAN_MAX_REMCSUM_START (VXLAN_RCO_MASK << VXLAN_RCO_SHIFT)
+
+#define VXLAN_N_VID     (1u << 24)
+#define VXLAN_VID_MASK  (VXLAN_N_VID - 1)
+#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+
+struct vxlan_metadata {
+       __be32          vni;
+       u32             gbp;
+};
+
 struct vxlan_sock;
-typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key);
+typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb,
+                          struct vxlan_metadata *md);
 
 /* per UDP socket information */
 struct vxlan_sock {
@@ -31,6 +113,7 @@ struct vxlan_sock {
        struct hlist_head vni_list[VNI_HASH_SIZE];
        atomic_t          refcnt;
        struct udp_offload udp_offloads;
+       u32               flags;
 };
 
 #define VXLAN_F_LEARN                  0x01
@@ -42,6 +125,16 @@ struct vxlan_sock {
 #define VXLAN_F_UDP_CSUM               0x40
 #define VXLAN_F_UDP_ZERO_CSUM6_TX      0x80
 #define VXLAN_F_UDP_ZERO_CSUM6_RX      0x100
+#define VXLAN_F_REMCSUM_TX             0x200
+#define VXLAN_F_REMCSUM_RX             0x400
+#define VXLAN_F_GBP                    0x800
+
+/* Flags that are used in the receive patch. These flags must match in
+ * order for a socket to be shareable
+ */
+#define VXLAN_F_RCV_FLAGS              (VXLAN_F_GBP |                  \
+                                        VXLAN_F_UDP_ZERO_CSUM6_RX |    \
+                                        VXLAN_F_REMCSUM_RX)
 
 struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
                                  vxlan_rcv_t *rcv, void *data,
@@ -49,10 +142,10 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
 
 void vxlan_sock_release(struct vxlan_sock *vs);
 
-int vxlan_xmit_skb(struct vxlan_sock *vs,
-                  struct rtable *rt, struct sk_buff *skb,
+int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
                   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-                  __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
+                  __be16 src_port, __be16 dst_port, struct vxlan_metadata *md,
+                  bool xnet, u32 vxflags);
 
 static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
                                                     netdev_features_t features)
index 1e7f74acc2eccb75b735b82c66d1612ce762c248..b429b73e875ea2725f87d10d21c318adab83e4d5 100644 (file)
@@ -857,7 +857,7 @@ static inline unsigned int params_channels(const struct snd_pcm_hw_params *p)
 }
 
 /**
- * params_channels - Get the sample rate from the hw params
+ * params_rate - Get the sample rate from the hw params
  * @p: hw params
  */
 static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
@@ -866,7 +866,7 @@ static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
 }
 
 /**
- * params_channels - Get the period size (in frames) from the hw params
+ * params_period_size - Get the period size (in frames) from the hw params
  * @p: hw params
  */
 static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
@@ -875,7 +875,7 @@ static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
 }
 
 /**
- * params_channels - Get the number of periods from the hw params
+ * params_periods - Get the number of periods from the hw params
  * @p: hw params
  */
 static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
@@ -884,7 +884,7 @@ static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
 }
 
 /**
- * params_channels - Get the buffer size (in frames) from the hw params
+ * params_buffer_size - Get the buffer size (in frames) from the hw params
  * @p: hw params
  */
 static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
@@ -893,7 +893,7 @@ static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
 }
 
 /**
- * params_channels - Get the buffer size (in bytes) from the hw params
+ * params_buffer_bytes - Get the buffer size (in bytes) from the hw params
  * @p: hw params
  */
 static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p)
index 430cfaf92285f177d977d11717599bf1ff85b70b..db81c65b8f4857c011a025e5b54026bf7d683f7c 100644 (file)
@@ -135,7 +135,6 @@ int se_dev_set_is_nonrot(struct se_device *, int);
 int    se_dev_set_emulate_rest_reord(struct se_device *dev, int);
 int    se_dev_set_queue_depth(struct se_device *, u32);
 int    se_dev_set_max_sectors(struct se_device *, u32);
-int    se_dev_set_fabric_max_sectors(struct se_device *, u32);
 int    se_dev_set_optimal_sectors(struct se_device *, u32);
 int    se_dev_set_block_size(struct se_device *, u32);
 
index 3247d7530107968aa7c1d1957f96790c226845dd..186f7a92357094fbb4735043b2a2b8c8cd884af8 100644 (file)
@@ -98,8 +98,6 @@ static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name
        TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR);           \
        DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors);                 \
        TB_DEV_ATTR_RO(_backend, hw_max_sectors);                       \
-       DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors);                \
-       TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR);   \
        DEF_TB_DEV_ATTRIB(_backend, optimal_sectors);                   \
        TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR);      \
        DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth);                 \
index 397fb635766a96faa94c5b91788ad24fca0d2a34..4a8795a87b9e99f30ee07f43fdce3984ddc658e0 100644 (file)
@@ -77,8 +77,6 @@
 #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
 /* Default max_write_same_len, disabled by default */
 #define DA_MAX_WRITE_SAME_LEN                  0
-/* Default max transfer length */
-#define DA_FABRIC_MAX_SECTORS                  8192
 /* Use a model alias based on the configfs backend device name */
 #define DA_EMULATE_MODEL_ALIAS                 0
 /* Emulation for Direct Page Out */
@@ -694,7 +692,6 @@ struct se_dev_attrib {
        u32             hw_block_size;
        u32             block_size;
        u32             hw_max_sectors;
-       u32             fabric_max_sectors;
        u32             optimal_sectors;
        u32             hw_queue_depth;
        u32             queue_depth;
index 6edf1f2028cdb0e0801af85183e02d0416fa73ed..86b399c66c3d6d3c5f12d1cd8749777b9d5c89f9 100644 (file)
@@ -146,6 +146,14 @@ TRACE_EVENT(kvm_msi_set_irq,
 
 #if defined(CONFIG_HAVE_KVM_IRQFD)
 
+#ifdef kvm_irqchips
+#define kvm_ack_irq_string "irqchip %s pin %u"
+#define kvm_ack_irq_parm  __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
+#else
+#define kvm_ack_irq_string "irqchip %d pin %u"
+#define kvm_ack_irq_parm  __entry->irqchip, __entry->pin
+#endif
+
 TRACE_EVENT(kvm_ack_irq,
        TP_PROTO(unsigned int irqchip, unsigned int pin),
        TP_ARGS(irqchip, pin),
@@ -160,13 +168,7 @@ TRACE_EVENT(kvm_ack_irq,
                __entry->pin            = pin;
        ),
 
-#ifdef kvm_irqchips
-       TP_printk("irqchip %s pin %u",
-                 __print_symbolic(__entry->irqchip, kvm_irqchips),
-                __entry->pin)
-#else
-       TP_printk("irqchip %d pin %u", __entry->irqchip, __entry->pin)
-#endif
+       TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
 );
 
 #endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
index 1de256b358074b6b3486e4e1b4686f9ecace6320..49cc7c3de25221f2e014c80f12189bb1edc3b946 100644 (file)
@@ -40,9 +40,9 @@ TRACE_EVENT(net_dev_start_xmit,
                __assign_str(name, dev->name);
                __entry->queue_mapping = skb->queue_mapping;
                __entry->skbaddr = skb;
-               __entry->vlan_tagged = vlan_tx_tag_present(skb);
+               __entry->vlan_tagged = skb_vlan_tag_present(skb);
                __entry->vlan_proto = ntohs(skb->vlan_proto);
-               __entry->vlan_tci = vlan_tx_tag_get(skb);
+               __entry->vlan_tci = skb_vlan_tag_get(skb);
                __entry->protocol = ntohs(skb->protocol);
                __entry->ip_summed = skb->ip_summed;
                __entry->len = skb->len;
@@ -174,9 +174,9 @@ DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
 #endif
                __entry->queue_mapping = skb->queue_mapping;
                __entry->skbaddr = skb;
-               __entry->vlan_tagged = vlan_tx_tag_present(skb);
+               __entry->vlan_tagged = skb_vlan_tag_present(skb);
                __entry->vlan_proto = ntohs(skb->vlan_proto);
-               __entry->vlan_tci = vlan_tx_tag_get(skb);
+               __entry->vlan_tci = skb_vlan_tag_get(skb);
                __entry->protocol = ntohs(skb->protocol);
                __entry->ip_summed = skb->ip_summed;
                __entry->hash = skb->hash;
index 7543b3e51331fcb38574e3f309713b3a6a2d31c0..e063effe0cc18f0976f01b49cb56c3aeac47bf69 100644 (file)
@@ -5,7 +5,7 @@
 
 /*
  * FMODE_EXEC is 0x20
- * FMODE_NONOTIFY is 0x1000000
+ * FMODE_NONOTIFY is 0x4000000
  * These cannot be used by userspace O_* until internal and external open
  * flags are split.
  * -Eric Paris
index 00b100023c477bf80de661c386ceb27461f4bbd6..14b7b6e44c77ce279960d852936e9fbf37b231da 100644 (file)
@@ -283,6 +283,7 @@ header-y += net.h
 header-y += netlink_diag.h
 header-y += netlink.h
 header-y += netrom.h
+header-y += net_namespace.h
 header-y += net_tstamp.h
 header-y += nfc.h
 header-y += nfs2.h
index 3e4323a3918d7266efc46edb622636621f042afc..94ffe0c83ce72cf5e396a615fe039bec6fe468d4 100644 (file)
@@ -98,6 +98,7 @@ struct can_ctrlmode {
 #define CAN_CTRLMODE_BERR_REPORTING    0x10    /* Bus-error reporting */
 #define CAN_CTRLMODE_FD                        0x20    /* CAN FD mode */
 #define CAN_CTRLMODE_PRESUME_ACK       0x40    /* Ignore missing CAN ACKs */
+#define CAN_CTRLMODE_FD_NON_ISO                0x80    /* CAN FD in non-ISO mode */
 
 /*
  * CAN device statistics
index b03ee8f62d3c2a019924e4aabf02fdc462c75ffb..eaaea6208b424e7ef4fd361646b07fc497180a12 100644 (file)
@@ -125,6 +125,8 @@ enum {
 #define BRIDGE_VLAN_INFO_MASTER        (1<<0)  /* Operate on Bridge device as well */
 #define BRIDGE_VLAN_INFO_PVID  (1<<1)  /* VLAN is PVID, ingress untagged */
 #define BRIDGE_VLAN_INFO_UNTAGGED      (1<<2)  /* VLAN egresses untagged */
+#define BRIDGE_VLAN_INFO_RANGE_BEGIN   (1<<3) /* VLAN is start of vlan range */
+#define BRIDGE_VLAN_INFO_RANGE_END     (1<<4) /* VLAN is end of vlan range */
 
 struct bridge_vlan_info {
        __u16 flags;
index f7d0d2d7173aea9840ffe2a60d43021a9fed742a..0deee3eeddbf0e4a2b7af099b61c670381970cab 100644 (file)
@@ -146,6 +146,7 @@ enum {
        IFLA_PHYS_PORT_ID,
        IFLA_CARRIER_CHANGES,
        IFLA_PHYS_SWITCH_ID,
+       IFLA_LINK_NETNSID,
        __IFLA_MAX
 };
 
@@ -370,6 +371,9 @@ enum {
        IFLA_VXLAN_UDP_CSUM,
        IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
        IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
+       IFLA_VXLAN_REMCSUM_TX,
+       IFLA_VXLAN_REMCSUM_RX,
+       IFLA_VXLAN_GBP,
        __IFLA_VXLAN_MAX
 };
 #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
index c33a65e3d62c85d104d38ab082d997df13cd4c0b..589ced069e8a1a68a9b1c9336517d66a675a75d2 100644 (file)
@@ -109,6 +109,7 @@ struct in_addr {
 
 #define IP_MINTTL       21
 #define IP_NODEFRAG     22
+#define IP_CHECKSUM    23
 
 /* IP_MTU_DISCOVER values */
 #define IP_PMTUDISC_DONT               0       /* Never send DF frames */
index e863d088b9a5a54cfd18d1e507c6c6e758ba5516..437a6a4b125a1383be23027c7130966d7c862bd5 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _UAPI_IPV6_H
 #define _UAPI_IPV6_H
 
+#include <linux/libc-compat.h>
 #include <linux/types.h>
 #include <linux/in6.h>
 #include <asm/byteorder.h>
  *     *under construction*
  */
 
-
+#if __UAPI_DEF_IN6_PKTINFO
 struct in6_pktinfo {
        struct in6_addr ipi6_addr;
        int             ipi6_ifindex;
 };
+#endif
 
+#if __UAPI_DEF_IP6_MTUINFO
 struct ip6_mtuinfo {
        struct sockaddr_in6     ip6m_addr;
        __u32                   ip6m_mtu;
 };
+#endif
 
 struct in6_ifreq {
        struct in6_addr ifr6_addr;
@@ -165,6 +169,7 @@ enum {
        DEVCONF_SUPPRESS_FRAG_NDISC,
        DEVCONF_ACCEPT_RA_FROM_LOCAL,
        DEVCONF_USE_OPTIMISTIC,
+       DEVCONF_ACCEPT_RA_MTU,
        DEVCONF_MAX
 };
 
index 7acef41fc2092abee6e977970ddb22df2952e8f9..af94f31e33ac9d8ccb5f68312feff34ea4eb6aa1 100644 (file)
@@ -128,27 +128,34 @@ struct kfd_ioctl_get_process_apertures_args {
        uint32_t pad;
 };
 
-#define KFD_IOC_MAGIC 'K'
+#define AMDKFD_IOCTL_BASE 'K'
+#define AMDKFD_IO(nr)                  _IO(AMDKFD_IOCTL_BASE, nr)
+#define AMDKFD_IOR(nr, type)           _IOR(AMDKFD_IOCTL_BASE, nr, type)
+#define AMDKFD_IOW(nr, type)           _IOW(AMDKFD_IOCTL_BASE, nr, type)
+#define AMDKFD_IOWR(nr, type)          _IOWR(AMDKFD_IOCTL_BASE, nr, type)
 
-#define KFD_IOC_GET_VERSION \
-               _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args)
+#define AMDKFD_IOC_GET_VERSION                 \
+               AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
 
-#define KFD_IOC_CREATE_QUEUE \
-               _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args)
+#define AMDKFD_IOC_CREATE_QUEUE                        \
+               AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
 
-#define KFD_IOC_DESTROY_QUEUE \
-       _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args)
+#define AMDKFD_IOC_DESTROY_QUEUE               \
+               AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
 
-#define KFD_IOC_SET_MEMORY_POLICY \
-       _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args)
+#define AMDKFD_IOC_SET_MEMORY_POLICY           \
+               AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
 
-#define KFD_IOC_GET_CLOCK_COUNTERS \
-       _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args)
+#define AMDKFD_IOC_GET_CLOCK_COUNTERS          \
+               AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
 
-#define KFD_IOC_GET_PROCESS_APERTURES \
-       _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args)
+#define AMDKFD_IOC_GET_PROCESS_APERTURES       \
+               AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
 
-#define KFD_IOC_UPDATE_QUEUE \
-       _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args)
+#define AMDKFD_IOC_UPDATE_QUEUE                        \
+               AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
+
+#define AMDKFD_COMMAND_START           0x01
+#define AMDKFD_COMMAND_END             0x08
 
 #endif
index e28807ad17fa8dffb69879e1c0eaf23bf8971f88..fa673e9cc040aefcee4e96ee3e4bd6892c5be562 100644 (file)
@@ -70,6 +70,8 @@
 #define __UAPI_DEF_IPV6_MREQ           0
 #define __UAPI_DEF_IPPROTO_V6          0
 #define __UAPI_DEF_IPV6_OPTIONS                0
+#define __UAPI_DEF_IN6_PKTINFO         0
+#define __UAPI_DEF_IP6_MTUINFO         0
 
 #else
 
@@ -84,6 +86,8 @@
 #define __UAPI_DEF_IPV6_MREQ           1
 #define __UAPI_DEF_IPPROTO_V6          1
 #define __UAPI_DEF_IPV6_OPTIONS                1
+#define __UAPI_DEF_IN6_PKTINFO         1
+#define __UAPI_DEF_IP6_MTUINFO         1
 
 #endif /* _NETINET_IN_H */
 
 #define __UAPI_DEF_IPV6_MREQ           1
 #define __UAPI_DEF_IPPROTO_V6          1
 #define __UAPI_DEF_IPV6_OPTIONS                1
+#define __UAPI_DEF_IN6_PKTINFO         1
+#define __UAPI_DEF_IP6_MTUINFO         1
 
 /* Definitions for xattr.h */
 #define __UAPI_DEF_XATTR               1
index f3d77f9f1e0bb582b1f1ca02fd8bd99ff60729dd..3873a35509aad201f4d5ddb31346077a89641b78 100644 (file)
@@ -25,6 +25,7 @@ enum {
        NDA_VNI,
        NDA_IFINDEX,
        NDA_MASTER,
+       NDA_LINK_NETNSID,
        __NDA_MAX
 };
 
diff --git a/include/uapi/linux/net_namespace.h b/include/uapi/linux/net_namespace.h
new file mode 100644 (file)
index 0000000..778cd2c
--- /dev/null
@@ -0,0 +1,23 @@
+/* Copyright (c) 2015 6WIND S.A.
+ * Author: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+#ifndef _UAPI_LINUX_NET_NAMESPACE_H_
+#define _UAPI_LINUX_NET_NAMESPACE_H_
+
+/* Attributes of RTM_NEWNSID/RTM_GETNSID messages */
+enum {
+       NETNSA_NONE,
+#define NETNSA_NSID_NOT_ASSIGNED -1
+       NETNSA_NSID,
+       NETNSA_PID,
+       NETNSA_FD,
+       __NETNSA_MAX,
+};
+
+#define NETNSA_MAX             (__NETNSA_MAX - 1)
+
+#endif /* _UAPI_LINUX_NET_NAMESPACE_H_ */
index edbc888ceb51c1ffa66b11c1f58eea0343b80faf..6d1abea9746ea7b7d1128a0c1fd1a3ad5f45b96d 100644 (file)
@@ -24,8 +24,9 @@ enum {
        SOF_TIMESTAMPING_TX_SCHED = (1<<8),
        SOF_TIMESTAMPING_TX_ACK = (1<<9),
        SOF_TIMESTAMPING_OPT_CMSG = (1<<10),
+       SOF_TIMESTAMPING_OPT_TSONLY = (1<<11),
 
-       SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_CMSG,
+       SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TSONLY,
        SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
                                 SOF_TIMESTAMPING_LAST
 };
index 8119255feae4b44df8bac40415f4681a9ccf3c97..c1e2e63cf9b57873810f68056932589a9cde3ca2 100644 (file)
@@ -183,6 +183,7 @@ enum nfc_attrs {
        NFC_ATTR_SE_APDU,
        NFC_ATTR_TARGET_ISO15693_DSFID,
        NFC_ATTR_TARGET_ISO15693_UID,
+       NFC_ATTR_SE_PARAMS,
 /* private: internal use only */
        __NFC_ATTR_AFTER_LAST
 };
index f52797a90816e022f752dd5ccf7465e43cfeb41b..68b294e839447ab5a4bfeeaf0b2bebf6fe2ba427 100644 (file)
  *     %NL80211_ATTR_WIPHY and %NL80211_ATTR_WIPHY_NAME.
  *
  * @NL80211_CMD_GET_INTERFACE: Request an interface's configuration;
- *     either a dump request on a %NL80211_ATTR_WIPHY or a specific get
- *     on an %NL80211_ATTR_IFINDEX is supported.
+ *     either a dump request for all interfaces or a specific get with a
+ *     single %NL80211_ATTR_IFINDEX is supported.
  * @NL80211_CMD_SET_INTERFACE: Set type of a virtual interface, requires
  *     %NL80211_ATTR_IFINDEX and %NL80211_ATTR_IFTYPE.
  * @NL80211_CMD_NEW_INTERFACE: Newly created virtual interface or response
  *     if passed, define which channels should be scanned; if not
  *     passed, all channels allowed for the current regulatory domain
  *     are used.  Extra IEs can also be passed from the userspace by
- *     using the %NL80211_ATTR_IE attribute.
+ *     using the %NL80211_ATTR_IE attribute.  The first cycle of the
+ *     scheduled scan can be delayed by %NL80211_ATTR_SCHED_SCAN_DELAY
+ *     is supplied.
  * @NL80211_CMD_STOP_SCHED_SCAN: stop a scheduled scan. Returns -ENOENT if
  *     scheduled scan is not running. The caller may assume that as soon
  *     as the call returns, it is safe to start a new scheduled scan again.
@@ -1735,6 +1737,9 @@ enum nl80211_commands {
  *     should be contained in the result as the sum of the respective counters
  *     over all channels.
  *
+ * @NL80211_ATTR_SCHED_SCAN_DELAY: delay before a scheduled scan (or a
+ *     WoWLAN net-detect scan) is started, u32 in seconds.
+ *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2098,6 +2103,10 @@ enum nl80211_attrs {
 
        NL80211_ATTR_SURVEY_RADIO_STATS,
 
+       NL80211_ATTR_NETNS_FD,
+
+       NL80211_ATTR_SCHED_SCAN_DELAY,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -3741,11 +3750,12 @@ struct nl80211_pattern_support {
  * @NL80211_WOWLAN_TRIG_NET_DETECT: wake up when a configured network
  *     is detected.  This is a nested attribute that contains the
  *     same attributes used with @NL80211_CMD_START_SCHED_SCAN.  It
- *     specifies how the scan is performed (e.g. the interval and the
- *     channels to scan) as well as the scan results that will
- *     trigger a wake (i.e. the matchsets).  This attribute is also
- *     sent in a response to @NL80211_CMD_GET_WIPHY, indicating the
- *     number of match sets supported by the driver (u32).
+ *     specifies how the scan is performed (e.g. the interval, the
+ *     channels to scan and the initial delay) as well as the scan
+ *     results that will trigger a wake (i.e. the matchsets).  This
+ *     attribute is also sent in a response to
+ *     @NL80211_CMD_GET_WIPHY, indicating the number of match sets
+ *     supported by the driver (u32).
  * @NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS: nested attribute
  *     containing an array with information about what triggered the
  *     wake up.  If no elements are present in the array, it means
index 3a6dcaa359b768d09bfc58f71c0f9b23582d9f24..bbd49a0c46c7775c4bdeac171a33d721f77b287a 100644 (file)
@@ -174,6 +174,10 @@ enum ovs_packet_attr {
        OVS_PACKET_ATTR_USERDATA,    /* OVS_ACTION_ATTR_USERSPACE arg. */
        OVS_PACKET_ATTR_EGRESS_TUN_KEY,  /* Nested OVS_TUNNEL_KEY_ATTR_*
                                            attributes. */
+       OVS_PACKET_ATTR_UNUSED1,
+       OVS_PACKET_ATTR_UNUSED2,
+       OVS_PACKET_ATTR_PROBE,      /* Packet operation is a feature probe,
+                                      error logging should be suppressed. */
        __OVS_PACKET_ATTR_MAX
 };
 
@@ -248,11 +252,21 @@ enum ovs_vport_attr {
 
 #define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
 
+enum {
+       OVS_VXLAN_EXT_UNSPEC,
+       OVS_VXLAN_EXT_GBP,      /* Flag or __u32 */
+       __OVS_VXLAN_EXT_MAX,
+};
+
+#define OVS_VXLAN_EXT_MAX (__OVS_VXLAN_EXT_MAX - 1)
+
+
 /* OVS_VPORT_ATTR_OPTIONS attributes for tunnels.
  */
 enum {
        OVS_TUNNEL_ATTR_UNSPEC,
        OVS_TUNNEL_ATTR_DST_PORT, /* 16-bit UDP port, used by L4 tunnels. */
+       OVS_TUNNEL_ATTR_EXTENSION,
        __OVS_TUNNEL_ATTR_MAX
 };
 
@@ -324,6 +338,7 @@ enum ovs_tunnel_key_attr {
        OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,        /* Array of Geneve options. */
        OVS_TUNNEL_KEY_ATTR_TP_SRC,             /* be16 src Transport Port. */
        OVS_TUNNEL_KEY_ATTR_TP_DST,             /* be16 dst Transport Port. */
+       OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS,         /* Nested OVS_VXLAN_EXT_* */
        __OVS_TUNNEL_KEY_ATTR_MAX
 };
 
@@ -444,6 +459,14 @@ struct ovs_key_nd {
  * a wildcarded match. Omitting attribute is treated as wildcarding all
  * corresponding fields. Optional for all requests. If not present,
  * all flow key bits are exact match bits.
+ * @OVS_FLOW_ATTR_UFID: A value between 1-16 octets specifying a unique
+ * identifier for the flow. Causes the flow to be indexed by this value rather
+ * than the value of the %OVS_FLOW_ATTR_KEY attribute. Optional for all
+ * requests. Present in notifications if the flow was created with this
+ * attribute.
+ * @OVS_FLOW_ATTR_UFID_FLAGS: A 32-bit value of OR'd %OVS_UFID_F_*
+ * flags that provide alternative semantics for flow installation and
+ * retrieval. Optional for all requests.
  *
  * These attributes follow the &struct ovs_header within the Generic Netlink
  * payload for %OVS_FLOW_* commands.
@@ -459,11 +482,23 @@ enum ovs_flow_attr {
        OVS_FLOW_ATTR_MASK,      /* Sequence of OVS_KEY_ATTR_* attributes. */
        OVS_FLOW_ATTR_PROBE,     /* Flow operation is a feature probe, error
                                  * logging should be suppressed. */
+       OVS_FLOW_ATTR_UFID,      /* Variable length unique flow identifier. */
+       OVS_FLOW_ATTR_UFID_FLAGS,/* u32 of OVS_UFID_F_*. */
        __OVS_FLOW_ATTR_MAX
 };
 
 #define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1)
 
+/**
+ * Omit attributes for notifications.
+ *
+ * If a datapath request contains an %OVS_UFID_F_OMIT_* flag, then the datapath
+ * may omit the corresponding %OVS_FLOW_ATTR_* from the response.
+ */
+#define OVS_UFID_F_OMIT_KEY      (1 << 0)
+#define OVS_UFID_F_OMIT_MASK     (1 << 1)
+#define OVS_UFID_F_OMIT_ACTIONS  (1 << 2)
+
 /**
  * enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action.
  * @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with
@@ -564,6 +599,12 @@ struct ovs_action_hash {
  * @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header.  The
  * single nested %OVS_KEY_ATTR_* attribute specifies a header to modify and its
  * value.
+ * @OVS_ACTION_ATTR_SET_MASKED: Replaces the contents of an existing header.  A
+ * nested %OVS_KEY_ATTR_* attribute specifies a header to modify, its value,
+ * and a mask.  For every bit set in the mask, the corresponding bit value
+ * is copied from the value to the packet header field, rest of the bits are
+ * left unchanged.  The non-masked value bits must be passed in as zeroes.
+ * Masking is not supported for the %OVS_KEY_ATTR_TUNNEL attribute.
  * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the
  * packet.
  * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
@@ -582,6 +623,9 @@ struct ovs_action_hash {
  * Only a single header can be set with a single %OVS_ACTION_ATTR_SET.  Not all
  * fields within a header are modifiable, e.g. the IPv4 protocol and fragment
  * type may not be changed.
+ *
+ * @OVS_ACTION_ATTR_SET_TO_MASKED: Kernel internal masked set action translated
+ * from the @OVS_ACTION_ATTR_SET.
  */
 
 enum ovs_action_attr {
@@ -596,8 +640,19 @@ enum ovs_action_attr {
        OVS_ACTION_ATTR_HASH,         /* struct ovs_action_hash. */
        OVS_ACTION_ATTR_PUSH_MPLS,    /* struct ovs_action_push_mpls. */
        OVS_ACTION_ATTR_POP_MPLS,     /* __be16 ethertype. */
+       OVS_ACTION_ATTR_SET_MASKED,   /* One nested OVS_KEY_ATTR_* including
+                                      * data immediately followed by a mask.
+                                      * The data must be zero for the unmasked
+                                      * bits. */
 
-       __OVS_ACTION_ATTR_MAX
+       __OVS_ACTION_ATTR_MAX,        /* Nothing past this will be accepted
+                                      * from userspace. */
+
+#ifdef __KERNEL__
+       OVS_ACTION_ATTR_SET_TO_MASKED, /* Kernel module internal masked
+                                       * set action converted from
+                                       * OVS_ACTION_ATTR_SET. */
+#endif
 };
 
 #define OVS_ACTION_ATTR_MAX (__OVS_ACTION_ATTR_MAX - 1)
index d62316baae942c43b2558ed2768c88950516126c..534b847107453019d362e9f9f9c0969fc3100c8b 100644 (file)
@@ -774,6 +774,8 @@ enum {
 
        TCA_FQ_FLOW_REFILL_DELAY,       /* flow credit refill delay in usec */
 
+       TCA_FQ_ORPHAN_MASK,     /* mask applied to orphaned skb hashes */
+
        __TCA_FQ_MAX
 };
 
index 9c9b8b4480cd4608d783318b7dbac4c060db210a..5cc5d66bf519f65cb4b29e041c3b06fdbe01c889 100644 (file)
@@ -132,6 +132,11 @@ enum {
        RTM_GETMDB = 86,
 #define RTM_GETMDB RTM_GETMDB
 
+       RTM_NEWNSID = 88,
+#define RTM_NEWNSID RTM_NEWNSID
+       RTM_GETNSID = 90,
+#define RTM_GETNSID RTM_GETNSID
+
        __RTM_MAX,
 #define RTM_MAX                (((__RTM_MAX + 3) & ~3) - 1)
 };
@@ -389,6 +394,8 @@ enum {
 #define RTAX_INITRWND RTAX_INITRWND
        RTAX_QUICKACK,
 #define RTAX_QUICKACK RTAX_QUICKACK
+       RTAX_CC_ALGO,
+#define RTAX_CC_ALGO RTAX_CC_ALGO
        __RTAX_MAX
 };
 
@@ -634,6 +641,7 @@ struct tcamsg {
 /* New extended info filters for IFLA_EXT_MASK */
 #define RTEXT_FILTER_VF                (1 << 0)
 #define RTEXT_FILTER_BRVLAN    (1 << 1)
+#define RTEXT_FILTER_BRVLAN_COMPRESSED (1 << 2)
 
 /* End of information exported to user level */
 
index b22224100011b9dc3b620c0c930344414889afb8..6a6fb747c78db0bfb763212c826832f206107a0d 100644 (file)
@@ -270,6 +270,12 @@ enum
        LINUX_MIB_TCPHYSTARTTRAINCWND,          /* TCPHystartTrainCwnd */
        LINUX_MIB_TCPHYSTARTDELAYDETECT,        /* TCPHystartDelayDetect */
        LINUX_MIB_TCPHYSTARTDELAYCWND,          /* TCPHystartDelayCwnd */
+       LINUX_MIB_TCPACKSKIPPEDSYNRECV,         /* TCPACKSkippedSynRecv */
+       LINUX_MIB_TCPACKSKIPPEDPAWS,            /* TCPACKSkippedPAWS */
+       LINUX_MIB_TCPACKSKIPPEDSEQ,             /* TCPACKSkippedSeq */
+       LINUX_MIB_TCPACKSKIPPEDFINWAIT2,        /* TCPACKSkippedFinWait2 */
+       LINUX_MIB_TCPACKSKIPPEDTIMEWAIT,        /* TCPACKSkippedTimeWait */
+       LINUX_MIB_TCPACKSKIPPEDCHALLENGE,       /* TCPACKSkippedChallenge */
        __LINUX_MIB_MAX
 };
 
index b057da2b87a40a788346b09e46aaed27580f16c0..19d5219b0b991eda86a5bb8a0274d35a5a88ce17 100644 (file)
@@ -8,3 +8,4 @@ header-y += tc_nat.h
 header-y += tc_pedit.h
 header-y += tc_skbedit.h
 header-y += tc_vlan.h
+header-y += tc_bpf.h
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
new file mode 100644 (file)
index 0000000..5288bd7
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_TC_BPF_H
+#define __LINUX_TC_BPF_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_BPF 13
+
+struct tc_act_bpf {
+       tc_gen;
+};
+
+enum {
+       TCA_ACT_BPF_UNSPEC,
+       TCA_ACT_BPF_TM,
+       TCA_ACT_BPF_PARMS,
+       TCA_ACT_BPF_OPS_LEN,
+       TCA_ACT_BPF_OPS,
+       __TCA_ACT_BPF_MAX,
+};
+#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tc_act/tc_connmark.h b/include/uapi/linux/tc_act/tc_connmark.h
new file mode 100644 (file)
index 0000000..994b097
--- /dev/null
@@ -0,0 +1,22 @@
+#ifndef __UAPI_TC_CONNMARK_H
+#define __UAPI_TC_CONNMARK_H
+
+#include <linux/types.h>
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_CONNMARK 14
+
+struct tc_connmark {
+       tc_gen;
+       __u16 zone;
+};
+
+enum {
+       TCA_CONNMARK_UNSPEC,
+       TCA_CONNMARK_PARMS,
+       TCA_CONNMARK_TM,
+       __TCA_CONNMARK_MAX
+};
+#define TCA_CONNMARK_MAX (__TCA_CONNMARK_MAX - 1)
+
+#endif
index baeab83deb6469c5abb22ac61ee07f73b0dfedfe..013c9d8db3720cc1c606323def3e47692702cfa2 100644 (file)
@@ -82,7 +82,7 @@ struct uinput_ff_erase {
  * The complete sysfs path is then /sys/devices/virtual/input/--NAME--
  * Usually, it is in the form "inputN"
  */
-#define UI_GET_SYSNAME(len)    _IOC(_IOC_READ, UINPUT_IOCTL_BASE, 300, len)
+#define UI_GET_SYSNAME(len)    _IOC(_IOC_READ, UINPUT_IOCTL_BASE, 44, len)
 
 /**
  * UI_GET_VERSION - Return version of uinput protocol
@@ -91,7 +91,7 @@ struct uinput_ff_erase {
  * the integer pointed to by the ioctl argument. The protocol version
  * is hard-coded in the kernel and is independent of the uinput device.
  */
-#define UI_GET_VERSION         _IOR(UINPUT_IOCTL_BASE, 301, unsigned int)
+#define UI_GET_VERSION         _IOR(UINPUT_IOCTL_BASE, 45, unsigned int)
 
 /*
  * To write a force-feedback-capable driver, the upload_effect
index 61c818a7fe70dfca4d26b2ef7f4269e9da22233f..a3318f31e8e7fd05f317c595e8c4ce3bc3595fac 100644 (file)
@@ -101,6 +101,13 @@ struct vring {
        struct vring_used *used;
 };
 
+/* Alignment requirements for vring elements.
+ * When using pre-virtio 1.0 layout, these fall out naturally.
+ */
+#define VRING_AVAIL_ALIGN_SIZE 2
+#define VRING_USED_ALIGN_SIZE 4
+#define VRING_DESC_ALIGN_SIZE 16
+
 /* The standard layout for the ring is a continuous chunk of memory which looks
  * like this.  We assume num is a power of 2.
  *
diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h
new file mode 100644 (file)
index 0000000..b47d9d0
--- /dev/null
@@ -0,0 +1,51 @@
+/******************************************************************************
+ * nmi.h
+ *
+ * NMI callback registration and reason codes.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_NMI_H__
+#define __XEN_PUBLIC_NMI_H__
+
+#include <xen/interface/xen.h>
+
+/*
+ * NMI reason codes:
+ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
+ */
+ /* I/O-check error reported via ISA port 0x61, bit 6. */
+#define _XEN_NMIREASON_io_error     0
+#define XEN_NMIREASON_io_error      (1UL << _XEN_NMIREASON_io_error)
+ /* PCI SERR reported via ISA port 0x61, bit 7. */
+#define _XEN_NMIREASON_pci_serr     1
+#define XEN_NMIREASON_pci_serr      (1UL << _XEN_NMIREASON_pci_serr)
+ /* Unknown hardware-generated NMI. */
+#define _XEN_NMIREASON_unknown      2
+#define XEN_NMIREASON_unknown       (1UL << _XEN_NMIREASON_unknown)
+
+/*
+ * long nmi_op(unsigned int cmd, void *arg)
+ * NB. All ops return zero on success, else a negative error code.
+ */
+
+/*
+ * Register NMI callback for this (calling) VCPU. Currently this only makes
+ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
+ * arg == pointer to xennmi_callback structure.
+ */
+#define XENNMI_register_callback   0
+struct xennmi_callback {
+    unsigned long handler_address;
+    unsigned long pad;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xennmi_callback);
+
+/*
+ * Deregister NMI callback for this (calling) VCPU.
+ * arg == NULL.
+ */
+#define XENNMI_unregister_callback 1
+
+#endif /* __XEN_PUBLIC_NMI_H__ */
index 12765b6f9517ee6169d73ba667fb86d4ce57a016..c5ed20bb3fe96d5f309c5299a55a7ba3b7a140bd 100644 (file)
@@ -3,6 +3,11 @@
 
 #include <asm/xen/page.h>
 
+static inline unsigned long page_to_mfn(struct page *page)
+{
+       return pfn_to_mfn(page_to_pfn(page));
+}
+
 struct xen_memory_region {
        phys_addr_t start;
        phys_addr_t size;
index 37c69ab561dad881c9fd76eaac1ed5b48e5bab52..072566dd0caf7739fc42b7d59c6791c29dc89343 100644 (file)
@@ -72,6 +72,8 @@
 #include <linux/fs_struct.h>
 #include <linux/compat.h>
 #include <linux/ctype.h>
+#include <linux/string.h>
+#include <uapi/linux/limits.h>
 
 #include "audit.h"
 
@@ -1861,8 +1863,7 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
        }
 
        list_for_each_entry_reverse(n, &context->names_list, list) {
-               /* does the name pointer match? */
-               if (!n->name || n->name->name != name->name)
+               if (!n->name || strcmp(n->name->name, name->name))
                        continue;
 
                /* match the correct record type */
@@ -1881,14 +1882,44 @@ out_alloc:
        n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN);
        if (!n)
                return;
-       if (name)
-               /* since name is not NULL we know there is already a matching
-                * name record, see audit_getname(), so there must be a type
-                * mismatch; reuse the string path since the original name
-                * record will keep the string valid until we free it in
-                * audit_free_names() */
-               n->name = name;
+       /* unfortunately, while we may have a path name to record with the
+        * inode, we can't always rely on the string lasting until the end of
+        * the syscall so we need to create our own copy, it may fail due to
+        * memory allocation issues, but we do our best */
+       if (name) {
+               /* we can't use getname_kernel() due to size limits */
+               size_t len = strlen(name->name) + 1;
+               struct filename *new = __getname();
+
+               if (unlikely(!new))
+                       goto out;
+
+               if (len <= (PATH_MAX - sizeof(*new))) {
+                       new->name = (char *)(new) + sizeof(*new);
+                       new->separate = false;
+               } else if (len <= PATH_MAX) {
+                       /* this looks odd, but is due to final_putname() */
+                       struct filename *new2;
 
+                       new2 = kmalloc(sizeof(*new2), GFP_KERNEL);
+                       if (unlikely(!new2)) {
+                               __putname(new);
+                               goto out;
+                       }
+                       new2->name = (char *)new;
+                       new2->separate = true;
+                       new = new2;
+               } else {
+                       /* we should never get here, but let's be safe */
+                       __putname(new);
+                       goto out;
+               }
+               strlcpy((char *)new->name, name->name, len);
+               new->uptr = NULL;
+               new->aname = n;
+               n->name = new;
+               n->name_put = true;
+       }
 out:
        if (parent) {
                n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL;
index d6594e457a25af32e41a4b46a00a43ddf0a2c46b..a64e7a207d2b5cd123b65f7143c6d659c0aed726 100644 (file)
@@ -163,7 +163,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 
 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
 {
-       module_free(NULL, hdr);
+       module_memfree(hdr);
 }
 #endif /* CONFIG_BPF_JIT */
 
index 088ac0b1b106ff772e9122529866eb5406effd14..536edc2be3072e91ab132555fc4f9bc3ce656604 100644 (file)
@@ -150,7 +150,7 @@ static int map_lookup_elem(union bpf_attr *attr)
        int ufd = attr->map_fd;
        struct fd f = fdget(ufd);
        struct bpf_map *map;
-       void *key, *value;
+       void *key, *value, *ptr;
        int err;
 
        if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
@@ -169,20 +169,29 @@ static int map_lookup_elem(union bpf_attr *attr)
        if (copy_from_user(key, ukey, map->key_size) != 0)
                goto free_key;
 
-       err = -ENOENT;
-       rcu_read_lock();
-       value = map->ops->map_lookup_elem(map, key);
+       err = -ENOMEM;
+       value = kmalloc(map->value_size, GFP_USER);
        if (!value)
-               goto err_unlock;
+               goto free_key;
+
+       rcu_read_lock();
+       ptr = map->ops->map_lookup_elem(map, key);
+       if (ptr)
+               memcpy(value, ptr, map->value_size);
+       rcu_read_unlock();
+
+       err = -ENOENT;
+       if (!ptr)
+               goto free_value;
 
        err = -EFAULT;
        if (copy_to_user(uvalue, value, map->value_size) != 0)
-               goto err_unlock;
+               goto free_value;
 
        err = 0;
 
-err_unlock:
-       rcu_read_unlock();
+free_value:
+       kfree(value);
 free_key:
        kfree(key);
 err_put:
index bb263d0caab323810f2c30fc3799be467859a7ac..04cfe8ace52088a4c5ed092c389d08fd99117b19 100644 (file)
@@ -1909,7 +1909,7 @@ static void cgroup_kill_sb(struct super_block *sb)
         *
         * And don't kill the default root.
         */
-       if (css_has_online_children(&root->cgrp.self) ||
+       if (!list_empty(&root->cgrp.self.children) ||
            root == &cgrp_dfl_root)
                cgroup_put(&root->cgrp);
        else
index 1adf62b39b96b496e56ca8484c6939429981277c..07ce18ca71e0cd46b70155269a77b04af23f6526 100644 (file)
@@ -27,6 +27,9 @@
  * version 2. This program is licensed "as is" without any warranty of any
  * kind, whether express or implied.
  */
+
+#define pr_fmt(fmt) "KGDB: " fmt
+
 #include <linux/pid_namespace.h>
 #include <linux/clocksource.h>
 #include <linux/serial_core.h>
@@ -196,8 +199,8 @@ int __weak kgdb_validate_break_address(unsigned long addr)
                return err;
        err = kgdb_arch_remove_breakpoint(&tmp);
        if (err)
-               printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
-                  "memory destroyed at: %lx", addr);
+               pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
+                      addr);
        return err;
 }
 
@@ -256,8 +259,8 @@ int dbg_activate_sw_breakpoints(void)
                error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
                if (error) {
                        ret = error;
-                       printk(KERN_INFO "KGDB: BP install failed: %lx",
-                              kgdb_break[i].bpt_addr);
+                       pr_info("BP install failed: %lx\n",
+                               kgdb_break[i].bpt_addr);
                        continue;
                }
 
@@ -319,8 +322,8 @@ int dbg_deactivate_sw_breakpoints(void)
                        continue;
                error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
                if (error) {
-                       printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
-                              kgdb_break[i].bpt_addr);
+                       pr_info("BP remove failed: %lx\n",
+                               kgdb_break[i].bpt_addr);
                        ret = error;
                }
 
@@ -367,7 +370,7 @@ int dbg_remove_all_break(void)
                        goto setundefined;
                error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
                if (error)
-                       printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
+                       pr_err("breakpoint remove failed: %lx\n",
                               kgdb_break[i].bpt_addr);
 setundefined:
                kgdb_break[i].state = BP_UNDEFINED;
@@ -400,9 +403,9 @@ static int kgdb_io_ready(int print_wait)
        if (print_wait) {
 #ifdef CONFIG_KGDB_KDB
                if (!dbg_kdb_mode)
-                       printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
+                       pr_crit("waiting... or $3#33 for KDB\n");
 #else
-               printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
+               pr_crit("Waiting for remote debugger\n");
 #endif
        }
        return 1;
@@ -430,8 +433,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
                exception_level = 0;
                kgdb_skipexception(ks->ex_vector, ks->linux_regs);
                dbg_activate_sw_breakpoints();
-               printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
-                       addr);
+               pr_crit("re-enter error: breakpoint removed %lx\n", addr);
                WARN_ON_ONCE(1);
 
                return 1;
@@ -444,7 +446,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
                panic("Recursive entry to debugger");
        }
 
-       printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
+       pr_crit("re-enter exception: ALL breakpoints killed\n");
 #ifdef CONFIG_KGDB_KDB
        /* Allow kdb to debug itself one level */
        return 0;
@@ -471,6 +473,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
        int cpu;
        int trace_on = 0;
        int online_cpus = num_online_cpus();
+       u64 time_left;
 
        kgdb_info[ks->cpu].enter_kgdb++;
        kgdb_info[ks->cpu].exception_state |= exception_state;
@@ -595,9 +598,13 @@ return_normal:
        /*
         * Wait for the other CPUs to be notified and be waiting for us:
         */
-       while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
-                               atomic_read(&slaves_in_kgdb)) != online_cpus)
+       time_left = loops_per_jiffy * HZ;
+       while (kgdb_do_roundup && --time_left &&
+              (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
+                  online_cpus)
                cpu_relax();
+       if (!time_left)
+               pr_crit("KGDB: Timed out waiting for secondary CPUs.\n");
 
        /*
         * At this point the primary processor is completely
@@ -795,15 +802,15 @@ static struct console kgdbcons = {
 static void sysrq_handle_dbg(int key)
 {
        if (!dbg_io_ops) {
-               printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
+               pr_crit("ERROR: No KGDB I/O module available\n");
                return;
        }
        if (!kgdb_connected) {
 #ifdef CONFIG_KGDB_KDB
                if (!dbg_kdb_mode)
-                       printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
+                       pr_crit("KGDB or $3#33 for KDB\n");
 #else
-               printk(KERN_CRIT "Entering KGDB\n");
+               pr_crit("Entering KGDB\n");
 #endif
        }
 
@@ -945,7 +952,7 @@ static void kgdb_initial_breakpoint(void)
 {
        kgdb_break_asap = 0;
 
-       printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
+       pr_crit("Waiting for connection from remote gdb...\n");
        kgdb_breakpoint();
 }
 
@@ -964,8 +971,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
        if (dbg_io_ops) {
                spin_unlock(&kgdb_registration_lock);
 
-               printk(KERN_ERR "kgdb: Another I/O driver is already "
-                               "registered with KGDB.\n");
+               pr_err("Another I/O driver is already registered with KGDB\n");
                return -EBUSY;
        }
 
@@ -981,8 +987,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
 
        spin_unlock(&kgdb_registration_lock);
 
-       printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
-              new_dbg_io_ops->name);
+       pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
 
        /* Arm KGDB now. */
        kgdb_register_callbacks();
@@ -1017,8 +1022,7 @@ void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
 
        spin_unlock(&kgdb_registration_lock);
 
-       printk(KERN_INFO
-               "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
+       pr_info("Unregistered I/O driver %s, debugger disabled\n",
                old_dbg_io_ops->name);
 }
 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
index b20d544f20c2a12c24ac492c4dae56961e0b959d..e1dbf4a2c69e4ca9721c22184cb9f800325b9194 100644 (file)
@@ -531,22 +531,29 @@ void __init kdb_initbptab(void)
        for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++)
                bp->bp_free = 1;
 
-       kdb_register_repeat("bp", kdb_bp, "[<vaddr>]",
-               "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("bl", kdb_bp, "[<vaddr>]",
-               "Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("bp", kdb_bp, "[<vaddr>]",
+               "Set/Display breakpoints", 0,
+               KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("bl", kdb_bp, "[<vaddr>]",
+               "Display breakpoints", 0,
+               KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
        if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)
-               kdb_register_repeat("bph", kdb_bp, "[<vaddr>]",
-               "[datar [length]|dataw [length]]   Set hw brk", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("bc", kdb_bc, "<bpnum>",
-               "Clear Breakpoint", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("be", kdb_bc, "<bpnum>",
-               "Enable Breakpoint", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("bd", kdb_bc, "<bpnum>",
-               "Disable Breakpoint", 0, KDB_REPEAT_NONE);
-
-       kdb_register_repeat("ss", kdb_ss, "",
-               "Single Step", 1, KDB_REPEAT_NO_ARGS);
+               kdb_register_flags("bph", kdb_bp, "[<vaddr>]",
+               "[datar [length]|dataw [length]]   Set hw brk", 0,
+               KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("bc", kdb_bc, "<bpnum>",
+               "Clear Breakpoint", 0,
+               KDB_ENABLE_FLOW_CTRL);
+       kdb_register_flags("be", kdb_bc, "<bpnum>",
+               "Enable Breakpoint", 0,
+               KDB_ENABLE_FLOW_CTRL);
+       kdb_register_flags("bd", kdb_bc, "<bpnum>",
+               "Disable Breakpoint", 0,
+               KDB_ENABLE_FLOW_CTRL);
+
+       kdb_register_flags("ss", kdb_ss, "",
+               "Single Step", 1,
+               KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
        /*
         * Architecture dependent initialization.
         */
index 8859ca34dcfe0a58dbd530b8668d8d88760a3eb4..15e1a7af5dd033f130ef2b4ed96cf1e2809442f5 100644 (file)
@@ -129,6 +129,10 @@ int kdb_stub(struct kgdb_state *ks)
                ks->pass_exception = 1;
                KDB_FLAG_SET(CATASTROPHIC);
        }
+       /* set CATASTROPHIC if the system contains unresponsive processors */
+       for_each_online_cpu(i)
+               if (!kgdb_info[i].enter_kgdb)
+                       KDB_FLAG_SET(CATASTROPHIC);
        if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) {
                KDB_STATE_CLEAR(SSBPT);
                KDB_STATE_CLEAR(DOING_SS);
index 379650b984f8150bd7ead11fa57767261ce21758..7b40c5f07dce8d09e1ebaba547e401b5655befbb 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 #include <linux/ctype.h>
+#include <linux/types.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/kmsg_dump.h>
@@ -23,6 +24,7 @@
 #include <linux/vmalloc.h>
 #include <linux/atomic.h>
 #include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/kallsyms.h>
 #include <linux/slab.h>
 #include "kdb_private.h"
 
+#undef MODULE_PARAM_PREFIX
+#define        MODULE_PARAM_PREFIX "kdb."
+
+static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE;
+module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600);
+
 #define GREP_LEN 256
 char kdb_grep_string[GREP_LEN];
 int kdb_grepping_flag;
@@ -121,6 +129,7 @@ static kdbmsg_t kdbmsgs[] = {
        KDBMSG(BADLENGTH, "Invalid length field"),
        KDBMSG(NOBP, "No Breakpoint exists"),
        KDBMSG(BADADDR, "Invalid address"),
+       KDBMSG(NOPERM, "Permission denied"),
 };
 #undef KDBMSG
 
@@ -187,6 +196,26 @@ struct task_struct *kdb_curr_task(int cpu)
        return p;
 }
 
+/*
+ * Check whether the flags of the current command and the permissions
+ * of the kdb console has allow a command to be run.
+ */
+static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
+                                  bool no_args)
+{
+       /* permissions comes from userspace so needs massaging slightly */
+       permissions &= KDB_ENABLE_MASK;
+       permissions |= KDB_ENABLE_ALWAYS_SAFE;
+
+       /* some commands change group when launched with no arguments */
+       if (no_args)
+               permissions |= permissions << KDB_ENABLE_NO_ARGS_SHIFT;
+
+       flags |= KDB_ENABLE_ALL;
+
+       return permissions & flags;
+}
+
 /*
  * kdbgetenv - This function will return the character string value of
  *     an environment variable.
@@ -475,6 +504,15 @@ int kdbgetaddrarg(int argc, const char **argv, int *nextarg,
        char *cp;
        kdb_symtab_t symtab;
 
+       /*
+        * If the enable flags prohibit both arbitrary memory access
+        * and flow control then there are no reasonable grounds to
+        * provide symbol lookup.
+        */
+       if (!kdb_check_flags(KDB_ENABLE_MEM_READ | KDB_ENABLE_FLOW_CTRL,
+                            kdb_cmd_enabled, false))
+               return KDB_NOPERM;
+
        /*
         * Process arguments which follow the following syntax:
         *
@@ -641,8 +679,13 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0)
                if (!s->count)
                        s->usable = 0;
                if (s->usable)
-                       kdb_register(s->name, kdb_exec_defcmd,
-                                    s->usage, s->help, 0);
+                       /* macros are always safe because when executed each
+                        * internal command re-enters kdb_parse() and is
+                        * safety checked individually.
+                        */
+                       kdb_register_flags(s->name, kdb_exec_defcmd, s->usage,
+                                          s->help, 0,
+                                          KDB_ENABLE_ALWAYS_SAFE);
                return 0;
        }
        if (!s->usable)
@@ -1003,25 +1046,22 @@ int kdb_parse(const char *cmdstr)
 
        if (i < kdb_max_commands) {
                int result;
+
+               if (!kdb_check_flags(tp->cmd_flags, kdb_cmd_enabled, argc <= 1))
+                       return KDB_NOPERM;
+
                KDB_STATE_SET(CMD);
                result = (*tp->cmd_func)(argc-1, (const char **)argv);
                if (result && ignore_errors && result > KDB_CMD_GO)
                        result = 0;
                KDB_STATE_CLEAR(CMD);
-               switch (tp->cmd_repeat) {
-               case KDB_REPEAT_NONE:
-                       argc = 0;
-                       if (argv[0])
-                               *(argv[0]) = '\0';
-                       break;
-               case KDB_REPEAT_NO_ARGS:
-                       argc = 1;
-                       if (argv[1])
-                               *(argv[1]) = '\0';
-                       break;
-               case KDB_REPEAT_WITH_ARGS:
-                       break;
-               }
+
+               if (tp->cmd_flags & KDB_REPEAT_WITH_ARGS)
+                       return result;
+
+               argc = tp->cmd_flags & KDB_REPEAT_NO_ARGS ? 1 : 0;
+               if (argv[argc])
+                       *(argv[argc]) = '\0';
                return result;
        }
 
@@ -1921,10 +1961,14 @@ static int kdb_rm(int argc, const char **argv)
  */
 static int kdb_sr(int argc, const char **argv)
 {
+       bool check_mask =
+           !kdb_check_flags(KDB_ENABLE_ALL, kdb_cmd_enabled, false);
+
        if (argc != 1)
                return KDB_ARGCOUNT;
+
        kdb_trap_printk++;
-       __handle_sysrq(*argv[1], false);
+       __handle_sysrq(*argv[1], check_mask);
        kdb_trap_printk--;
 
        return 0;
@@ -1979,7 +2023,7 @@ static int kdb_lsmod(int argc, const char **argv)
                kdb_printf("%-20s%8u  0x%p ", mod->name,
                           mod->core_size, (void *)mod);
 #ifdef CONFIG_MODULE_UNLOAD
-               kdb_printf("%4ld ", module_refcount(mod));
+               kdb_printf("%4d ", module_refcount(mod));
 #endif
                if (mod->state == MODULE_STATE_GOING)
                        kdb_printf(" (Unloading)");
@@ -2157,6 +2201,8 @@ static void kdb_cpu_status(void)
        for (start_cpu = -1, i = 0; i < NR_CPUS; i++) {
                if (!cpu_online(i)) {
                        state = 'F';    /* cpu is offline */
+               } else if (!kgdb_info[i].enter_kgdb) {
+                       state = 'D';    /* cpu is online but unresponsive */
                } else {
                        state = ' ';    /* cpu is responding to kdb */
                        if (kdb_task_state_char(KDB_TSK(i)) == 'I')
@@ -2210,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv)
        /*
         * Validate cpunum
         */
-       if ((cpunum > NR_CPUS) || !cpu_online(cpunum))
+       if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
                return KDB_BADCPUNUM;
 
        dbg_switch_cpu = cpunum;
@@ -2375,6 +2421,8 @@ static int kdb_help(int argc, const char **argv)
                        return 0;
                if (!kt->cmd_name)
                        continue;
+               if (!kdb_check_flags(kt->cmd_flags, kdb_cmd_enabled, true))
+                       continue;
                if (strlen(kt->cmd_usage) > 20)
                        space = "\n                                    ";
                kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name,
@@ -2629,7 +2677,7 @@ static int kdb_grep_help(int argc, const char **argv)
 }
 
 /*
- * kdb_register_repeat - This function is used to register a kernel
+ * kdb_register_flags - This function is used to register a kernel
  *     debugger command.
  * Inputs:
  *     cmd     Command name
@@ -2641,12 +2689,12 @@ static int kdb_grep_help(int argc, const char **argv)
  *     zero for success, one if a duplicate command.
  */
 #define kdb_command_extend 50  /* arbitrary */
-int kdb_register_repeat(char *cmd,
-                       kdb_func_t func,
-                       char *usage,
-                       char *help,
-                       short minlen,
-                       kdb_repeat_t repeat)
+int kdb_register_flags(char *cmd,
+                      kdb_func_t func,
+                      char *usage,
+                      char *help,
+                      short minlen,
+                      kdb_cmdflags_t flags)
 {
        int i;
        kdbtab_t *kp;
@@ -2694,19 +2742,18 @@ int kdb_register_repeat(char *cmd,
        kp->cmd_func   = func;
        kp->cmd_usage  = usage;
        kp->cmd_help   = help;
-       kp->cmd_flags  = 0;
        kp->cmd_minlen = minlen;
-       kp->cmd_repeat = repeat;
+       kp->cmd_flags  = flags;
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(kdb_register_repeat);
+EXPORT_SYMBOL_GPL(kdb_register_flags);
 
 
 /*
  * kdb_register - Compatibility register function for commands that do
  *     not need to specify a repeat state.  Equivalent to
- *     kdb_register_repeat with KDB_REPEAT_NONE.
+ *     kdb_register_flags with flags set to 0.
  * Inputs:
  *     cmd     Command name
  *     func    Function to execute the command
@@ -2721,8 +2768,7 @@ int kdb_register(char *cmd,
             char *help,
             short minlen)
 {
-       return kdb_register_repeat(cmd, func, usage, help, minlen,
-                                  KDB_REPEAT_NONE);
+       return kdb_register_flags(cmd, func, usage, help, minlen, 0);
 }
 EXPORT_SYMBOL_GPL(kdb_register);
 
@@ -2764,80 +2810,109 @@ static void __init kdb_inittab(void)
        for_each_kdbcmd(kp, i)
                kp->cmd_name = NULL;
 
-       kdb_register_repeat("md", kdb_md, "<vaddr>",
+       kdb_register_flags("md", kdb_md, "<vaddr>",
          "Display Memory Contents, also mdWcN, e.g. md8c1", 1,
-                           KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>",
-         "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>",
-         "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("mds", kdb_md, "<vaddr>",
-         "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>",
-         "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("go", kdb_go, "[<vaddr>]",
-         "Continue Execution", 1, KDB_REPEAT_NONE);
-       kdb_register_repeat("rd", kdb_rd, "",
-         "Display Registers", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("rm", kdb_rm, "<reg> <contents>",
-         "Modify Registers", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("ef", kdb_ef, "<vaddr>",
-         "Display exception frame", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("bt", kdb_bt, "[<vaddr>]",
-         "Stack traceback", 1, KDB_REPEAT_NONE);
-       kdb_register_repeat("btp", kdb_bt, "<pid>",
-         "Display stack for process <pid>", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
-         "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("btc", kdb_bt, "",
-         "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("btt", kdb_bt, "<vaddr>",
+         KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("mdr", kdb_md, "<vaddr> <bytes>",
+         "Display Raw Memory", 0,
+         KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("mdp", kdb_md, "<paddr> <bytes>",
+         "Display Physical Memory", 0,
+         KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("mds", kdb_md, "<vaddr>",
+         "Display Memory Symbolically", 0,
+         KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("mm", kdb_mm, "<vaddr> <contents>",
+         "Modify Memory Contents", 0,
+         KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("go", kdb_go, "[<vaddr>]",
+         "Continue Execution", 1,
+         KDB_ENABLE_REG_WRITE | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
+       kdb_register_flags("rd", kdb_rd, "",
+         "Display Registers", 0,
+         KDB_ENABLE_REG_READ);
+       kdb_register_flags("rm", kdb_rm, "<reg> <contents>",
+         "Modify Registers", 0,
+         KDB_ENABLE_REG_WRITE);
+       kdb_register_flags("ef", kdb_ef, "<vaddr>",
+         "Display exception frame", 0,
+         KDB_ENABLE_MEM_READ);
+       kdb_register_flags("bt", kdb_bt, "[<vaddr>]",
+         "Stack traceback", 1,
+         KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
+       kdb_register_flags("btp", kdb_bt, "<pid>",
+         "Display stack for process <pid>", 0,
+         KDB_ENABLE_INSPECT);
+       kdb_register_flags("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
+         "Backtrace all processes matching state flag", 0,
+         KDB_ENABLE_INSPECT);
+       kdb_register_flags("btc", kdb_bt, "",
+         "Backtrace current process on each cpu", 0,
+         KDB_ENABLE_INSPECT);
+       kdb_register_flags("btt", kdb_bt, "<vaddr>",
          "Backtrace process given its struct task address", 0,
-                           KDB_REPEAT_NONE);
-       kdb_register_repeat("env", kdb_env, "",
-         "Show environment variables", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("set", kdb_set, "",
-         "Set environment variables", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("help", kdb_help, "",
-         "Display Help Message", 1, KDB_REPEAT_NONE);
-       kdb_register_repeat("?", kdb_help, "",
-         "Display Help Message", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("cpu", kdb_cpu, "<cpunum>",
-         "Switch to new cpu", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("kgdb", kdb_kgdb, "",
-         "Enter kgdb mode", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("ps", kdb_ps, "[<flags>|A]",
-         "Display active task list", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("pid", kdb_pid, "<pidnum>",
-         "Switch to another task", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("reboot", kdb_reboot, "",
-         "Reboot the machine immediately", 0, KDB_REPEAT_NONE);
+         KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
+       kdb_register_flags("env", kdb_env, "",
+         "Show environment variables", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("set", kdb_set, "",
+         "Set environment variables", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("help", kdb_help, "",
+         "Display Help Message", 1,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("?", kdb_help, "",
+         "Display Help Message", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("cpu", kdb_cpu, "<cpunum>",
+         "Switch to new cpu", 0,
+         KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
+       kdb_register_flags("kgdb", kdb_kgdb, "",
+         "Enter kgdb mode", 0, 0);
+       kdb_register_flags("ps", kdb_ps, "[<flags>|A]",
+         "Display active task list", 0,
+         KDB_ENABLE_INSPECT);
+       kdb_register_flags("pid", kdb_pid, "<pidnum>",
+         "Switch to another task", 0,
+         KDB_ENABLE_INSPECT);
+       kdb_register_flags("reboot", kdb_reboot, "",
+         "Reboot the machine immediately", 0,
+         KDB_ENABLE_REBOOT);
 #if defined(CONFIG_MODULES)
-       kdb_register_repeat("lsmod", kdb_lsmod, "",
-         "List loaded kernel modules", 0, KDB_REPEAT_NONE);
+       kdb_register_flags("lsmod", kdb_lsmod, "",
+         "List loaded kernel modules", 0,
+         KDB_ENABLE_INSPECT);
 #endif
 #if defined(CONFIG_MAGIC_SYSRQ)
-       kdb_register_repeat("sr", kdb_sr, "<key>",
-         "Magic SysRq key", 0, KDB_REPEAT_NONE);
+       kdb_register_flags("sr", kdb_sr, "<key>",
+         "Magic SysRq key", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
 #endif
 #if defined(CONFIG_PRINTK)
-       kdb_register_repeat("dmesg", kdb_dmesg, "[lines]",
-         "Display syslog buffer", 0, KDB_REPEAT_NONE);
+       kdb_register_flags("dmesg", kdb_dmesg, "[lines]",
+         "Display syslog buffer", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
 #endif
        if (arch_kgdb_ops.enable_nmi) {
-               kdb_register_repeat("disable_nmi", kdb_disable_nmi, "",
-                 "Disable NMI entry to KDB", 0, KDB_REPEAT_NONE);
-       }
-       kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
-         "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>",
-         "Send a signal to a process", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("summary", kdb_summary, "",
-         "Summarize the system", 4, KDB_REPEAT_NONE);
-       kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
-         "Display per_cpu variables", 3, KDB_REPEAT_NONE);
-       kdb_register_repeat("grephelp", kdb_grep_help, "",
-         "Display help on | grep", 0, KDB_REPEAT_NONE);
+               kdb_register_flags("disable_nmi", kdb_disable_nmi, "",
+                 "Disable NMI entry to KDB", 0,
+                 KDB_ENABLE_ALWAYS_SAFE);
+       }
+       kdb_register_flags("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
+         "Define a set of commands, down to endefcmd", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("kill", kdb_kill, "<-signal> <pid>",
+         "Send a signal to a process", 0,
+         KDB_ENABLE_SIGNAL);
+       kdb_register_flags("summary", kdb_summary, "",
+         "Summarize the system", 4,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
+         "Display per_cpu variables", 3,
+         KDB_ENABLE_MEM_READ);
+       kdb_register_flags("grephelp", kdb_grep_help, "",
+         "Display help on | grep", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
 }
 
 /* Execute any commands defined in kdb_cmds.  */
index 7afd3c8c41d5d51f17a7a14566551bf2d731d068..eaacd1693954b13aa55c59028c597b6ed91488f3 100644 (file)
@@ -172,10 +172,9 @@ typedef struct _kdbtab {
        kdb_func_t cmd_func;            /* Function to execute command */
        char    *cmd_usage;             /* Usage String for this command */
        char    *cmd_help;              /* Help message for this command */
-       short    cmd_flags;             /* Parsing flags */
        short    cmd_minlen;            /* Minimum legal # command
                                         * chars required */
-       kdb_repeat_t cmd_repeat;        /* Does command auto repeat on enter? */
+       kdb_cmdflags_t cmd_flags;       /* Command behaviour flags */
 } kdbtab_t;
 
 extern int kdb_bt(int, const char **); /* KDB display back trace */
index 4c1ee7f2bebc4bfb1434fe472f0d66f120f8cdc0..19efcf13375a2960e6d8e6994a8aa660d6221253 100644 (file)
@@ -4461,18 +4461,14 @@ perf_output_sample_regs(struct perf_output_handle *handle,
 }
 
 static void perf_sample_regs_user(struct perf_regs *regs_user,
-                                 struct pt_regs *regs)
+                                 struct pt_regs *regs,
+                                 struct pt_regs *regs_user_copy)
 {
-       if (!user_mode(regs)) {
-               if (current->mm)
-                       regs = task_pt_regs(current);
-               else
-                       regs = NULL;
-       }
-
-       if (regs) {
-               regs_user->abi  = perf_reg_abi(current);
+       if (user_mode(regs)) {
+               regs_user->abi = perf_reg_abi(current);
                regs_user->regs = regs;
+       } else if (current->mm) {
+               perf_get_regs_user(regs_user, regs, regs_user_copy);
        } else {
                regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
                regs_user->regs = NULL;
@@ -4951,7 +4947,8 @@ void perf_prepare_sample(struct perf_event_header *header,
        }
 
        if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
-               perf_sample_regs_user(&data->regs_user, regs);
+               perf_sample_regs_user(&data->regs_user, regs,
+                                     &data->regs_user_copy);
 
        if (sample_type & PERF_SAMPLE_REGS_USER) {
                /* regs dump ABI info */
@@ -6779,7 +6776,6 @@ skip_type:
                __perf_event_init_context(&cpuctx->ctx);
                lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
                lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
-               cpuctx->ctx.type = cpu_context;
                cpuctx->ctx.pmu = pmu;
 
                __perf_cpu_hrtimer_init(cpuctx, cpu);
@@ -7423,7 +7419,19 @@ SYSCALL_DEFINE5(perf_event_open,
                 * task or CPU context:
                 */
                if (move_group) {
-                       if (group_leader->ctx->type != ctx->type)
+                       /*
+                        * Make sure we're both on the same task, or both
+                        * per-cpu events.
+                        */
+                       if (group_leader->ctx->task != ctx->task)
+                               goto err_context;
+
+                       /*
+                        * Make sure we're both events for the same CPU;
+                        * grouping events for different CPUs is broken; since
+                        * you can never concurrently schedule them anyhow.
+                        */
+                       if (group_leader->cpu != event->cpu)
                                goto err_context;
                } else {
                        if (group_leader->ctx != ctx)
index 1ea4369890a31b6776aeb85bada4bf097594cce2..6806c55475eec17be40b1d6c53cf9fe007376279 100644 (file)
@@ -1287,9 +1287,15 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
 static int wait_consider_task(struct wait_opts *wo, int ptrace,
                                struct task_struct *p)
 {
+       /*
+        * We can race with wait_task_zombie() from another thread.
+        * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
+        * can't confuse the checks below.
+        */
+       int exit_state = ACCESS_ONCE(p->exit_state);
        int ret;
 
-       if (unlikely(p->exit_state == EXIT_DEAD))
+       if (unlikely(exit_state == EXIT_DEAD))
                return 0;
 
        ret = eligible_child(wo, p);
@@ -1310,7 +1316,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
                return 0;
        }
 
-       if (unlikely(p->exit_state == EXIT_TRACE)) {
+       if (unlikely(exit_state == EXIT_TRACE)) {
                /*
                 * ptrace == 0 means we are the natural parent. In this case
                 * we should clear notask_error, debugger will notify us.
@@ -1337,7 +1343,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
        }
 
        /* slay zombie? */
-       if (p->exit_state == EXIT_ZOMBIE) {
+       if (exit_state == EXIT_ZOMBIE) {
                /* we don't reap group leaders with subthreads */
                if (!delay_group_leader(p)) {
                        /*
index 06f58309fed2d082b7a859f3effa5b2562e4d447..ee619929cf9091059406e8f82df32c50d609c0d9 100644 (file)
@@ -127,7 +127,7 @@ static void *alloc_insn_page(void)
 
 static void free_insn_page(void *page)
 {
-       module_free(NULL, page);
+       module_memfree(page);
 }
 
 struct kprobe_insn_cache kprobe_insn_slots = {
index 5cf6731b98e9ecf1ffffa754371701613cc64bcb..3ef3736002d895854794a4d940adcd96288640f2 100644 (file)
@@ -80,13 +80,13 @@ void debug_mutex_unlock(struct mutex *lock)
                        DEBUG_LOCKS_WARN_ON(lock->owner != current);
 
                DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
-               mutex_clear_owner(lock);
        }
 
        /*
         * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
         * mutexes so that we can do it here after we've verified state.
         */
+       mutex_clear_owner(lock);
        atomic_set(&lock->count, 1);
 }
 
index 4b082b5cac9eedfd7c31daef7e8dd02974eedbed..db3ccb1dd614800e006c0a8cfd99b9c40aae124a 100644 (file)
@@ -363,6 +363,14 @@ void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
 }
 EXPORT_SYMBOL(_raw_spin_lock_nested);
 
+void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
+{
+       __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+       spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+       LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+}
+EXPORT_SYMBOL(_raw_spin_lock_bh_nested);
+
 unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
                                                   int subclass)
 {
index 3965511ae1333d5bcf0d50ac43bd031be76a823b..d856e96a3cce440f4c9bb0bc5e7fbf2eee4b1afe 100644 (file)
@@ -772,9 +772,18 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
        return 0;
 }
 
-unsigned long module_refcount(struct module *mod)
+/**
+ * module_refcount - return the refcount or -1 if unloading
+ *
+ * @mod:       the module we're checking
+ *
+ * Returns:
+ *     -1 if the module is in the process of unloading
+ *     otherwise the number of references in the kernel to the module
+ */
+int module_refcount(struct module *mod)
 {
-       return (unsigned long)atomic_read(&mod->refcnt) - MODULE_REF_BASE;
+       return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
 }
 EXPORT_SYMBOL(module_refcount);
 
@@ -856,7 +865,7 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod)
        struct module_use *use;
        int printed_something = 0;
 
-       seq_printf(m, " %lu ", module_refcount(mod));
+       seq_printf(m, " %i ", module_refcount(mod));
 
        /*
         * Always include a trailing , so userspace can differentiate
@@ -908,7 +917,7 @@ EXPORT_SYMBOL_GPL(symbol_put_addr);
 static ssize_t show_refcnt(struct module_attribute *mattr,
                           struct module_kobject *mk, char *buffer)
 {
-       return sprintf(buffer, "%lu\n", module_refcount(mk->mod));
+       return sprintf(buffer, "%i\n", module_refcount(mk->mod));
 }
 
 static struct module_attribute modinfo_refcnt =
@@ -1795,7 +1804,7 @@ static void unset_module_core_ro_nx(struct module *mod) { }
 static void unset_module_init_ro_nx(struct module *mod) { }
 #endif
 
-void __weak module_free(struct module *mod, void *module_region)
+void __weak module_memfree(void *module_region)
 {
        vfree(module_region);
 }
@@ -1804,6 +1813,10 @@ void __weak module_arch_cleanup(struct module *mod)
 {
 }
 
+void __weak module_arch_freeing_init(struct module *mod)
+{
+}
+
 /* Free a module, remove from lists, etc. */
 static void free_module(struct module *mod)
 {
@@ -1841,7 +1854,8 @@ static void free_module(struct module *mod)
 
        /* This may be NULL, but that's OK */
        unset_module_init_ro_nx(mod);
-       module_free(mod, mod->module_init);
+       module_arch_freeing_init(mod);
+       module_memfree(mod->module_init);
        kfree(mod->args);
        percpu_modfree(mod);
 
@@ -1850,7 +1864,7 @@ static void free_module(struct module *mod)
 
        /* Finally, free the core (containing the module structure) */
        unset_module_core_ro_nx(mod);
-       module_free(mod, mod->module_core);
+       module_memfree(mod->module_core);
 
 #ifdef CONFIG_MPU
        update_protections(current->mm);
@@ -2785,7 +2799,7 @@ static int move_module(struct module *mod, struct load_info *info)
                 */
                kmemleak_ignore(ptr);
                if (!ptr) {
-                       module_free(mod, mod->module_core);
+                       module_memfree(mod->module_core);
                        return -ENOMEM;
                }
                memset(ptr, 0, mod->init_size);
@@ -2930,8 +2944,9 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
 static void module_deallocate(struct module *mod, struct load_info *info)
 {
        percpu_modfree(mod);
-       module_free(mod, mod->module_init);
-       module_free(mod, mod->module_core);
+       module_arch_freeing_init(mod);
+       module_memfree(mod->module_init);
+       module_memfree(mod->module_core);
 }
 
 int __weak module_finalize(const Elf_Ehdr *hdr,
@@ -2983,10 +2998,31 @@ static void do_mod_ctors(struct module *mod)
 #endif
 }
 
+/* For freeing module_init on success, in case kallsyms traversing */
+struct mod_initfree {
+       struct rcu_head rcu;
+       void *module_init;
+};
+
+static void do_free_init(struct rcu_head *head)
+{
+       struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
+       module_memfree(m->module_init);
+       kfree(m);
+}
+
 /* This is where the real work happens */
 static int do_init_module(struct module *mod)
 {
        int ret = 0;
+       struct mod_initfree *freeinit;
+
+       freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
+       if (!freeinit) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+       freeinit->module_init = mod->module_init;
 
        /*
         * We want to find out whether @mod uses async during init.  Clear
@@ -2999,18 +3035,7 @@ static int do_init_module(struct module *mod)
        if (mod->init != NULL)
                ret = do_one_initcall(mod->init);
        if (ret < 0) {
-               /*
-                * Init routine failed: abort.  Try to protect us from
-                * buggy refcounters.
-                */
-               mod->state = MODULE_STATE_GOING;
-               synchronize_sched();
-               module_put(mod);
-               blocking_notifier_call_chain(&module_notify_list,
-                                            MODULE_STATE_GOING, mod);
-               free_module(mod);
-               wake_up_all(&module_wq);
-               return ret;
+               goto fail_free_freeinit;
        }
        if (ret > 0) {
                pr_warn("%s: '%s'->init suspiciously returned %d, it should "
@@ -3055,15 +3080,35 @@ static int do_init_module(struct module *mod)
        mod->strtab = mod->core_strtab;
 #endif
        unset_module_init_ro_nx(mod);
-       module_free(mod, mod->module_init);
+       module_arch_freeing_init(mod);
        mod->module_init = NULL;
        mod->init_size = 0;
        mod->init_ro_size = 0;
        mod->init_text_size = 0;
+       /*
+        * We want to free module_init, but be aware that kallsyms may be
+        * walking this with preempt disabled.  In all the failure paths,
+        * we call synchronize_rcu/synchronize_sched, but we don't want
+        * to slow down the success path, so use actual RCU here.
+        */
+       call_rcu(&freeinit->rcu, do_free_init);
        mutex_unlock(&module_mutex);
        wake_up_all(&module_wq);
 
        return 0;
+
+fail_free_freeinit:
+       kfree(freeinit);
+fail:
+       /* Try to protect us from buggy refcounters. */
+       mod->state = MODULE_STATE_GOING;
+       synchronize_sched();
+       module_put(mod);
+       blocking_notifier_call_chain(&module_notify_list,
+                                    MODULE_STATE_GOING, mod);
+       free_module(mod);
+       wake_up_all(&module_wq);
+       return ret;
 }
 
 static int may_init_module(void)
index 0af9b2c4e56c6cf604699096d0ac4d44c874b36e..728e05b167de984afe2e815f63207952051c3915 100644 (file)
@@ -642,12 +642,15 @@ static __modinit int add_sysfs_param(struct module_kobject *mk,
        mk->mp->grp.attrs = new_attrs;
 
        /* Tack new one on the end. */
+       memset(&mk->mp->attrs[mk->mp->num], 0, sizeof(mk->mp->attrs[0]));
        sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr);
        mk->mp->attrs[mk->mp->num].param = kp;
        mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show;
        /* Do not allow runtime DAC changes to make param writable. */
        if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0)
                mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store;
+       else
+               mk->mp->attrs[mk->mp->num].mattr.store = NULL;
        mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name;
        mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm;
        mk->mp->num++;
index 322ea8e93e4ba36c11c0348a34c12e58c983da03..82cfc285b046d8e320559a48cf08007a19742dc0 100644 (file)
@@ -113,12 +113,12 @@ static int cmp_range(const void *x1, const void *x2)
 {
        const struct range *r1 = x1;
        const struct range *r2 = x2;
-       s64 start1, start2;
 
-       start1 = r1->start;
-       start2 = r2->start;
-
-       return start1 - start2;
+       if (r1->start < r2->start)
+               return -1;
+       if (r1->start > r2->start)
+               return 1;
+       return 0;
 }
 
 int clean_sort_range(struct range *range, int az)
index b5797b78add65d967ab22f91c9f05182da0de2b4..e628cb11b5606306e11f456471022fa65488976b 100644 (file)
@@ -7112,9 +7112,6 @@ void __init sched_init(void)
 #endif
 #ifdef CONFIG_RT_GROUP_SCHED
        alloc_size += 2 * nr_cpu_ids * sizeof(void **);
-#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
-       alloc_size += num_possible_cpus() * cpumask_size();
 #endif
        if (alloc_size) {
                ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
@@ -7135,13 +7132,13 @@ void __init sched_init(void)
                ptr += nr_cpu_ids * sizeof(void **);
 
 #endif /* CONFIG_RT_GROUP_SCHED */
+       }
 #ifdef CONFIG_CPUMASK_OFFSTACK
-               for_each_possible_cpu(i) {
-                       per_cpu(load_balance_mask, i) = (void *)ptr;
-                       ptr += cpumask_size();
-               }
-#endif /* CONFIG_CPUMASK_OFFSTACK */
+       for_each_possible_cpu(i) {
+               per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
+                       cpumask_size(), GFP_KERNEL, cpu_to_node(i));
        }
+#endif /* CONFIG_CPUMASK_OFFSTACK */
 
        init_rt_bandwidth(&def_rt_bandwidth,
                        global_rt_period(), global_rt_runtime());
@@ -7295,13 +7292,12 @@ void __might_sleep(const char *file, int line, int preempt_offset)
         * since we will exit with TASK_RUNNING make sure we enter with it,
         * otherwise we will destroy state.
         */
-       if (WARN_ONCE(current->state != TASK_RUNNING,
+       WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
                        "do not call blocking ops when !TASK_RUNNING; "
                        "state=%lx set at [<%p>] %pS\n",
                        current->state,
                        (void *)current->task_state_change,
-                       (void *)current->task_state_change))
-               __set_current_state(TASK_RUNNING);
+                       (void *)current->task_state_change);
 
        ___might_sleep(file, line, preempt_offset);
 }
index e5db8c6feebd7e319b20b482885a1bd5e8ad3be1..b52092f2636d50e8a816b2e7e20a648b00d6bb70 100644 (file)
@@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
 static
 int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
 {
-       int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
-       int rorun = dl_se->runtime <= 0;
-
-       if (!rorun && !dmiss)
-               return 0;
-
-       /*
-        * If we are beyond our current deadline and we are still
-        * executing, then we have already used some of the runtime of
-        * the next instance. Thus, if we do not account that, we are
-        * stealing bandwidth from the system at each deadline miss!
-        */
-       if (dmiss) {
-               dl_se->runtime = rorun ? dl_se->runtime : 0;
-               dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
-       }
-
-       return 1;
+       return (dl_se->runtime <= 0);
 }
 
 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
@@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
         * parameters of the task might need updating. Otherwise,
         * we want a replenishment of its runtime.
         */
-       if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
-               replenish_dl_entity(dl_se, pi_se);
-       else
+       if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
                update_dl_entity(dl_se, pi_se);
+       else if (flags & ENQUEUE_REPLENISH)
+               replenish_dl_entity(dl_se, pi_se);
 
        __enqueue_dl_entity(dl_se);
 }
index df2cdf77f8998d46f58cbf4bc0b3693311c96911..40667cbf371ba9e8732e6c30940cc146752ee0c3 100644 (file)
@@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
 
 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 {
+       /* init_cfs_bandwidth() was not called */
+       if (!cfs_b->throttled_cfs_rq.next)
+               return;
+
        hrtimer_cancel(&cfs_b->period_timer);
        hrtimer_cancel(&cfs_b->slack_timer);
 }
@@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
                 * wl = S * s'_i; see (2)
                 */
                if (W > 0 && w < W)
-                       wl = (w * tg->shares) / W;
+                       wl = (w * (long)tg->shares) / W;
                else
                        wl = tg->shares;
 
index a8c9f5a7dda68f9afd60449a3f9fe0b7d048a6f1..ea9c881098941ecd9bbb42fa69a3ddb958d2ec41 100644 (file)
@@ -2210,9 +2210,13 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                up_write(&me->mm->mmap_sem);
                break;
        case PR_MPX_ENABLE_MANAGEMENT:
+               if (arg2 || arg3 || arg4 || arg5)
+                       return -EINVAL;
                error = MPX_ENABLE_MANAGEMENT(me);
                break;
        case PR_MPX_DISABLE_MANAGEMENT:
+               if (arg2 || arg3 || arg4 || arg5)
+                       return -EINVAL;
                error = MPX_DISABLE_MANAGEMENT(me);
                break;
        default:
index 670fff88a9613e07ac52d70a5365875d93e1e29c..21f82c29c9143c9a39e658593b0ac4671ce9c1b9 100644 (file)
@@ -111,13 +111,8 @@ static int send_reply(struct sk_buff *skb, struct genl_info *info)
 {
        struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
        void *reply = genlmsg_data(genlhdr);
-       int rc;
 
-       rc = genlmsg_end(skb, reply);
-       if (rc < 0) {
-               nlmsg_free(skb);
-               return rc;
-       }
+       genlmsg_end(skb, reply);
 
        return genlmsg_reply(skb, info);
 }
@@ -134,11 +129,7 @@ static void send_cpu_listeners(struct sk_buff *skb,
        void *reply = genlmsg_data(genlhdr);
        int rc, delcount = 0;
 
-       rc = genlmsg_end(skb, reply);
-       if (rc < 0) {
-               nlmsg_free(skb);
-               return;
-       }
+       genlmsg_end(skb, reply);
 
        rc = 0;
        down_read(&listeners->sem);
index 87a346fd6d61ff1c5c1045c9db063ee3247d2361..28bf91c60a0b412d1c97911659c30d735ba40d78 100644 (file)
@@ -633,6 +633,13 @@ int ntp_validate_timex(struct timex *txc)
        if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
                return -EPERM;
 
+       if (txc->modes & ADJ_FREQUENCY) {
+               if (LONG_MIN / PPM_SCALE > txc->freq)
+                       return -EINVAL;
+               if (LONG_MAX / PPM_SCALE < txc->freq)
+                       return -EINVAL;
+       }
+
        return 0;
 }
 
index 6390517e77d48abb83c0d0ffec27864b4695c4c4..2c85b7724af4b0081a112e1b12cbcce4ef831117 100644 (file)
@@ -196,6 +196,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
        if (tv) {
                if (copy_from_user(&user_tv, tv, sizeof(*tv)))
                        return -EFAULT;
+
+               if (!timeval_valid(&user_tv))
+                       return -EINVAL;
+
                new_ts.tv_sec = user_tv.tv_sec;
                new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
        }
index 929a733d302e0d438d2f15f8dfaf6e2e4c56c0d0..224e768bdc738da7c47aca41fcc6d9ecd4c190b4 100644 (file)
@@ -2497,12 +2497,14 @@ static void ftrace_run_update_code(int command)
 }
 
 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
-                                  struct ftrace_hash *old_hash)
+                                  struct ftrace_ops_hash *old_hash)
 {
        ops->flags |= FTRACE_OPS_FL_MODIFYING;
-       ops->old_hash.filter_hash = old_hash;
+       ops->old_hash.filter_hash = old_hash->filter_hash;
+       ops->old_hash.notrace_hash = old_hash->notrace_hash;
        ftrace_run_update_code(command);
        ops->old_hash.filter_hash = NULL;
+       ops->old_hash.notrace_hash = NULL;
        ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
 }
 
@@ -3579,7 +3581,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
 
 static int ftrace_probe_registered;
 
-static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
+static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
 {
        int ret;
        int i;
@@ -3637,6 +3639,7 @@ int
 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                              void *data)
 {
+       struct ftrace_ops_hash old_hash_ops;
        struct ftrace_func_probe *entry;
        struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
        struct ftrace_hash *old_hash = *orig_hash;
@@ -3658,6 +3661,10 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
 
        mutex_lock(&trace_probe_ops.func_hash->regex_lock);
 
+       old_hash_ops.filter_hash = old_hash;
+       /* Probes only have filters */
+       old_hash_ops.notrace_hash = NULL;
+
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
        if (!hash) {
                count = -ENOMEM;
@@ -3718,7 +3725,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
 
        ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
 
-       __enable_ftrace_function_probe(old_hash);
+       __enable_ftrace_function_probe(&old_hash_ops);
 
        if (!ret)
                free_ftrace_hash_rcu(old_hash);
@@ -4006,10 +4013,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
 }
 
 static void ftrace_ops_update_code(struct ftrace_ops *ops,
-                                  struct ftrace_hash *old_hash)
+                                  struct ftrace_ops_hash *old_hash)
 {
-       if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
+       struct ftrace_ops *op;
+
+       if (!ftrace_enabled)
+               return;
+
+       if (ops->flags & FTRACE_OPS_FL_ENABLED) {
                ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
+               return;
+       }
+
+       /*
+        * If this is the shared global_ops filter, then we need to
+        * check if there is another ops that shares it, is enabled.
+        * If so, we still need to run the modify code.
+        */
+       if (ops->func_hash != &global_ops.local_hash)
+               return;
+
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (op->func_hash == &global_ops.local_hash &&
+                   op->flags & FTRACE_OPS_FL_ENABLED) {
+                       ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
+                       /* Only need to do this once */
+                       return;
+               }
+       } while_for_each_ftrace_op(op);
 }
 
 static int
@@ -4017,6 +4048,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
                unsigned long ip, int remove, int reset, int enable)
 {
        struct ftrace_hash **orig_hash;
+       struct ftrace_ops_hash old_hash_ops;
        struct ftrace_hash *old_hash;
        struct ftrace_hash *hash;
        int ret;
@@ -4053,9 +4085,11 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
 
        mutex_lock(&ftrace_lock);
        old_hash = *orig_hash;
+       old_hash_ops.filter_hash = ops->func_hash->filter_hash;
+       old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
        if (!ret) {
-               ftrace_ops_update_code(ops, old_hash);
+               ftrace_ops_update_code(ops, &old_hash_ops);
                free_ftrace_hash_rcu(old_hash);
        }
        mutex_unlock(&ftrace_lock);
@@ -4267,6 +4301,7 @@ static void __init set_ftrace_early_filters(void)
 int ftrace_regex_release(struct inode *inode, struct file *file)
 {
        struct seq_file *m = (struct seq_file *)file->private_data;
+       struct ftrace_ops_hash old_hash_ops;
        struct ftrace_iterator *iter;
        struct ftrace_hash **orig_hash;
        struct ftrace_hash *old_hash;
@@ -4300,10 +4335,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
                mutex_lock(&ftrace_lock);
                old_hash = *orig_hash;
+               old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
+               old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
                ret = ftrace_hash_move(iter->ops, filter_hash,
                                       orig_hash, iter->hash);
                if (!ret) {
-                       ftrace_ops_update_code(iter->ops, old_hash);
+                       ftrace_ops_update_code(iter->ops, &old_hash_ops);
                        free_ftrace_hash_rcu(old_hash);
                }
                mutex_unlock(&ftrace_lock);
index 2e767972e99c2e8791236d10afa717b920543b70..4a9079b9f082fd3bb14e3b46522b1540b001fea1 100644 (file)
@@ -6918,7 +6918,6 @@ void __init trace_init(void)
                        tracepoint_printk = 0;
        }
        tracer_alloc_buffers();
-       init_ftrace_syscalls();
        trace_event_init();     
 }
 
index 366a78a3e61e21a94c06aa96f5b5c02a72c41e78..b03a0ea77b993cf9f175ed7b44fc239832de7def 100644 (file)
@@ -2429,12 +2429,39 @@ static __init int event_trace_memsetup(void)
        return 0;
 }
 
+static __init void
+early_enable_events(struct trace_array *tr, bool disable_first)
+{
+       char *buf = bootup_event_buf;
+       char *token;
+       int ret;
+
+       while (true) {
+               token = strsep(&buf, ",");
+
+               if (!token)
+                       break;
+               if (!*token)
+                       continue;
+
+               /* Restarting syscalls requires that we stop them first */
+               if (disable_first)
+                       ftrace_set_clr_event(tr, token, 0);
+
+               ret = ftrace_set_clr_event(tr, token, 1);
+               if (ret)
+                       pr_warn("Failed to enable trace event: %s\n", token);
+
+               /* Put back the comma to allow this to be called again */
+               if (buf)
+                       *(buf - 1) = ',';
+       }
+}
+
 static __init int event_trace_enable(void)
 {
        struct trace_array *tr = top_trace_array();
        struct ftrace_event_call **iter, *call;
-       char *buf = bootup_event_buf;
-       char *token;
        int ret;
 
        if (!tr)
@@ -2456,18 +2483,7 @@ static __init int event_trace_enable(void)
         */
        __trace_early_add_events(tr);
 
-       while (true) {
-               token = strsep(&buf, ",");
-
-               if (!token)
-                       break;
-               if (!*token)
-                       continue;
-
-               ret = ftrace_set_clr_event(tr, token, 1);
-               if (ret)
-                       pr_warn("Failed to enable trace event: %s\n", token);
-       }
+       early_enable_events(tr, false);
 
        trace_printk_start_comm();
 
@@ -2478,6 +2494,31 @@ static __init int event_trace_enable(void)
        return 0;
 }
 
+/*
+ * event_trace_enable() is called from trace_event_init() first to
+ * initialize events and perhaps start any events that are on the
+ * command line. Unfortunately, there are some events that will not
+ * start this early, like the system call tracepoints that need
+ * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
+ * is called before pid 1 starts, and this flag is never set, making
+ * the syscall tracepoint never get reached, but the event is enabled
+ * regardless (and not doing anything).
+ */
+static __init int event_trace_enable_again(void)
+{
+       struct trace_array *tr;
+
+       tr = top_trace_array();
+       if (!tr)
+               return -ENODEV;
+
+       early_enable_events(tr, true);
+
+       return 0;
+}
+
+early_initcall(event_trace_enable_again);
+
 static __init int event_trace_init(void)
 {
        struct trace_array *tr;
index b0b1c44e923a358bffcebda8c24b3d5e861b2693..3ccf5c2c1320131e5b1bc0bfe6a26b02edc402a8 100644 (file)
@@ -132,8 +132,8 @@ static int kdb_ftdump(int argc, const char **argv)
 
 static __init int kdb_ftrace_register(void)
 {
-       kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
-                           "Dump ftrace log", 0, KDB_REPEAT_NONE);
+       kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
+                           "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
        return 0;
 }
 
index 6202b08f1933bd5eaca37f0e09ff5bbdfbb9f606..beeeac9e0e3e6829faa3fe16304cb3e7adbe8850 100644 (file)
@@ -1841,17 +1841,11 @@ static void pool_mayday_timeout(unsigned long __pool)
  * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Does GFP_KERNEL allocations.  Called only from
  * manager.
- *
- * Return:
- * %false if no action was taken and pool->lock stayed locked, %true
- * otherwise.
  */
-static bool maybe_create_worker(struct worker_pool *pool)
+static void maybe_create_worker(struct worker_pool *pool)
 __releases(&pool->lock)
 __acquires(&pool->lock)
 {
-       if (!need_to_create_worker(pool))
-               return false;
 restart:
        spin_unlock_irq(&pool->lock);
 
@@ -1877,7 +1871,6 @@ restart:
         */
        if (need_to_create_worker(pool))
                goto restart;
-       return true;
 }
 
 /**
@@ -1897,16 +1890,14 @@ restart:
  * multiple times.  Does GFP_KERNEL allocations.
  *
  * Return:
- * %false if the pool don't need management and the caller can safely start
- * processing works, %true indicates that the function released pool->lock
- * and reacquired it to perform some management function and that the
- * conditions that the caller verified while holding the lock before
- * calling the function might no longer be true.
+ * %false if the pool doesn't need management and the caller can safely
+ * start processing works, %true if management function was performed and
+ * the conditions that the caller verified before calling the function may
+ * no longer be true.
  */
 static bool manage_workers(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
-       bool ret = false;
 
        /*
         * Anyone who successfully grabs manager_arb wins the arbitration
@@ -1919,12 +1910,12 @@ static bool manage_workers(struct worker *worker)
         * actual management, the pool may stall indefinitely.
         */
        if (!mutex_trylock(&pool->manager_arb))
-               return ret;
+               return false;
 
-       ret |= maybe_create_worker(pool);
+       maybe_create_worker(pool);
 
        mutex_unlock(&pool->manager_arb);
-       return ret;
+       return true;
 }
 
 /**
index 5f2ce616c0462db9b9055528110268385b2b653e..a8f3c9993229b51cacd13a2f3ab34cc2dc3c2ac8 100644 (file)
@@ -1586,7 +1586,7 @@ config TEST_KSTRTOX
        tristate "Test kstrto*() family of functions at runtime"
 
 config TEST_RHASHTABLE
-       bool "Perform selftest on resizable hash table"
+       tristate "Perform selftest on resizable hash table"
        default n
        help
          Enable this option to test the rhashtable functions at boot.
index 358eb81fa28d1951dc443410a847aeac50ea23bb..c635a107a7dece45eafa5dd30c76b41c4733ffa2 100644 (file)
@@ -73,6 +73,31 @@ config KGDB_KDB
        help
          KDB frontend for kernel
 
+config KDB_DEFAULT_ENABLE
+       hex "KDB: Select kdb command functions to be enabled by default"
+       depends on KGDB_KDB
+       default 0x1
+       help
+         Specifiers which kdb commands are enabled by default. This may
+         be set to 1 or 0 to enable all commands or disable almost all
+         commands.
+
+         Alternatively the following bitmask applies:
+
+           0x0002 - allow arbitrary reads from memory and symbol lookup
+           0x0004 - allow arbitrary writes to memory
+           0x0008 - allow current register state to be inspected
+           0x0010 - allow current register state to be modified
+           0x0020 - allow passive inspection (backtrace, process list, lsmod)
+           0x0040 - allow flow control management (breakpoint, single step)
+           0x0080 - enable signalling of processes
+           0x0100 - allow machine to be rebooted
+
+         The config option merely sets the default at boot time. Both
+         issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or
+          setting with kdb.cmd_enable=X kernel command line option will
+         override the default settings.
+
 config KDB_KEYBOARD
        bool "KGDB_KDB: keyboard as input device"
        depends on VT && KGDB_KDB
index 3c3b30b9e020d2e4bc02edc73575555641aad057..7db78934ec07c25cae7d62db863d227da41cd546 100644 (file)
@@ -24,7 +24,7 @@ obj-y += lockref.o
 
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
         bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
-        gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
+        gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \
         bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
         percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
 obj-y += string_helpers.o
@@ -35,6 +35,7 @@ obj-$(CONFIG_TEST_LKM) += test_module.o
 obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
 obj-$(CONFIG_TEST_BPF) += test_bpf.o
 obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
+obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
index 2404d03e251a64ae7634d30c46aa22a08f9b4538..03dd576e67730fb2870c44512f55c1c0c39b77f3 100644 (file)
@@ -11,6 +11,7 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 //#define DEBUG
+#include <linux/rcupdate.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/assoc_array_priv.h>
index 129775eb6de63a6155cea830df17b6c2476eb024..8b39e86dbab5ea2a1afad17f6a15368f846916d6 100644 (file)
@@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
 EXPORT_SYMBOL(csum_partial_copy);
 
 #ifndef csum_tcpudp_nofold
+static inline u32 from64to32(u64 x)
+{
+       /* add up 32-bit and 32-bit for 32+c bit */
+       x = (x & 0xffffffff) + (x >> 32);
+       /* add up carry.. */
+       x = (x & 0xffffffff) + (x >> 32);
+       return (u32)x;
+}
+
 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
                        unsigned short len,
                        unsigned short proto,
@@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 #else
        s += (proto + len) << 8;
 #endif
-       s += (s >> 32);
-       return (__force __wsum)s;
+       return (__force __wsum)from64to32(s);
 }
 EXPORT_SYMBOL(csum_tcpudp_nofold);
 #endif
diff --git a/lib/iovec.c b/lib/iovec.c
deleted file mode 100644 (file)
index 2d99cb4..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-#include <linux/uaccess.h>
-#include <linux/export.h>
-#include <linux/uio.h>
-
-/*
- *     Copy iovec to kernel. Returns -EFAULT on error.
- *
- *     Note: this modifies the original iovec.
- */
-
-int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
-{
-       while (len > 0) {
-               if (iov->iov_len) {
-                       int copy = min_t(unsigned int, len, iov->iov_len);
-                       if (copy_from_user(kdata, iov->iov_base, copy))
-                               return -EFAULT;
-                       len -= copy;
-                       kdata += copy;
-                       iov->iov_base += copy;
-                       iov->iov_len -= copy;
-               }
-               iov++;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(memcpy_fromiovec);
-
-/*
- *     Copy kernel to iovec. Returns -EFAULT on error.
- */
-
-int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
-                     int offset, int len)
-{
-       int copy;
-       for (; len > 0; ++iov) {
-               /* Skip over the finished iovecs */
-               if (unlikely(offset >= iov->iov_len)) {
-                       offset -= iov->iov_len;
-                       continue;
-               }
-               copy = min_t(unsigned int, iov->iov_len - offset, len);
-               if (copy_to_user(iov->iov_base + offset, kdata, copy))
-                       return -EFAULT;
-               offset = 0;
-               kdata += copy;
-               len -= copy;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(memcpy_toiovecend);
-
-/*
- *     Copy iovec to kernel. Returns -EFAULT on error.
- */
-
-int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
-                       int offset, int len)
-{
-       /* No data? Done! */
-       if (len == 0)
-               return 0;
-
-       /* Skip over the finished iovecs */
-       while (offset >= iov->iov_len) {
-               offset -= iov->iov_len;
-               iov++;
-       }
-
-       while (len > 0) {
-               u8 __user *base = iov->iov_base + offset;
-               int copy = min_t(unsigned int, len, iov->iov_len - offset);
-
-               offset = 0;
-               if (copy_from_user(kdata, base, copy))
-                       return -EFAULT;
-               len -= copy;
-               kdata += copy;
-               iov++;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(memcpy_fromiovecend);
index 6c3c723e902bb42c194fbf1c979a2197a549ffc2..9cc4c4a90d00686228bebdfe55b212c34e98206f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Resizable, Scalable, Concurrent Hash Table
  *
- * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  *
  * Based on the following paper:
 #include <linux/jhash.h>
 #include <linux/random.h>
 #include <linux/rhashtable.h>
+#include <linux/err.h>
 
 #define HASH_DEFAULT_SIZE      64UL
 #define HASH_MIN_SIZE          4UL
+#define BUCKET_LOCKS_PER_CPU   128UL
 
-#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
+/* Base bits plus 1 bit for nulls marker */
+#define HASH_RESERVED_SPACE    (RHT_BASE_BITS + 1)
 
-#ifdef CONFIG_PROVE_LOCKING
-int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
+enum {
+       RHT_LOCK_NORMAL,
+       RHT_LOCK_NESTED,
+};
+
+/* The bucket lock is selected based on the hash and protects mutations
+ * on a group of hash buckets.
+ *
+ * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
+ * a single lock always covers both buckets which may both contains
+ * entries which link to the same bucket of the old table during resizing.
+ * This allows to simplify the locking as locking the bucket in both
+ * tables during resize always guarantee protection.
+ *
+ * IMPORTANT: When holding the bucket lock of both the old and new table
+ * during expansions and shrinking, the old bucket lock must always be
+ * acquired first.
+ */
+static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
 {
-       return ht->p.mutex_is_held(ht->p.parent);
+       return &tbl->locks[hash & tbl->locks_mask];
 }
-EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
-#endif
 
 static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
 {
        return (void *) he - ht->p.head_offset;
 }
 
-static u32 __hashfn(const struct rhashtable *ht, const void *key,
-                     u32 len, u32 hsize)
+static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
+{
+       return hash & (tbl->size - 1);
+}
+
+static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
 {
-       u32 h;
+       u32 hash;
 
-       h = ht->p.hashfn(key, len, ht->p.hash_rnd);
+       if (unlikely(!ht->p.key_len))
+               hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
+       else
+               hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
+                                   ht->p.hash_rnd);
 
-       return h & (hsize - 1);
+       return hash >> HASH_RESERVED_SPACE;
 }
 
-/**
- * rhashtable_hashfn - compute hash for key of given length
- * @ht:                hash table to compute for
- * @key:       pointer to key
- * @len:       length of key
- *
- * Computes the hash value using the hash function provided in the 'hashfn'
- * of struct rhashtable_params. The returned value is guaranteed to be
- * smaller than the number of buckets in the hash table.
- */
-u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
+static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
 {
-       struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+       return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE;
+}
 
-       return __hashfn(ht, key, len, tbl->size);
+static u32 head_hashfn(const struct rhashtable *ht,
+                      const struct bucket_table *tbl,
+                      const struct rhash_head *he)
+{
+       return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
 }
-EXPORT_SYMBOL_GPL(rhashtable_hashfn);
 
-static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
+#ifdef CONFIG_PROVE_LOCKING
+static void debug_dump_buckets(const struct rhashtable *ht,
+                              const struct bucket_table *tbl)
 {
-       if (unlikely(!ht->p.key_len)) {
-               u32 h;
+       struct rhash_head *he;
+       unsigned int i, hash;
 
-               h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
+       for (i = 0; i < tbl->size; i++) {
+               pr_warn(" [Bucket %d] ", i);
+               rht_for_each_rcu(he, tbl, i) {
+                       hash = head_hashfn(ht, tbl, he);
+                       pr_cont("[hash = %#x, lock = %p] ",
+                               hash, bucket_lock(tbl, hash));
+               }
+               pr_cont("\n");
+       }
+
+}
+
+static void debug_dump_table(struct rhashtable *ht,
+                            const struct bucket_table *tbl,
+                            unsigned int hash)
+{
+       struct bucket_table *old_tbl, *future_tbl;
+
+       pr_emerg("BUG: lock for hash %#x in table %p not held\n",
+                hash, tbl);
 
-               return h & (hsize - 1);
+       rcu_read_lock();
+       future_tbl = rht_dereference_rcu(ht->future_tbl, ht);
+       old_tbl = rht_dereference_rcu(ht->tbl, ht);
+       if (future_tbl != old_tbl) {
+               pr_warn("Future table %p (size: %zd)\n",
+                       future_tbl, future_tbl->size);
+               debug_dump_buckets(ht, future_tbl);
        }
 
-       return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
+       pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size);
+       debug_dump_buckets(ht, old_tbl);
+
+       rcu_read_unlock();
 }
 
-/**
- * rhashtable_obj_hashfn - compute hash for hashed object
- * @ht:                hash table to compute for
- * @ptr:       pointer to hashed object
- *
- * Computes the hash value using the hash function `hashfn` respectively
- * 'obj_hashfn' depending on whether the hash table is set up to work with
- * a fixed length key. The returned value is guaranteed to be smaller than
- * the number of buckets in the hash table.
- */
-u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
+#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
+#define ASSERT_BUCKET_LOCK(HT, TBL, HASH)                              \
+       do {                                                            \
+               if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \
+                       debug_dump_table(HT, TBL, HASH);                \
+                       BUG();                                          \
+               }                                                       \
+       } while (0)
+
+int lockdep_rht_mutex_is_held(struct rhashtable *ht)
 {
-       struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+       return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
+}
+EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
 
-       return obj_hashfn(ht, ptr, tbl->size);
+int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
+{
+       spinlock_t *lock = bucket_lock(tbl, hash);
+
+       return (debug_locks) ? lockdep_is_held(lock) : 1;
 }
-EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
+EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
+#else
+#define ASSERT_RHT_MUTEX(HT)
+#define ASSERT_BUCKET_LOCK(HT, TBL, HASH)
+#endif
 
-static u32 head_hashfn(const struct rhashtable *ht,
-                      const struct rhash_head *he, u32 hsize)
+
+static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
 {
-       return obj_hashfn(ht, rht_obj(ht, he), hsize);
+       struct rhash_head __rcu **pprev;
+
+       for (pprev = &tbl->buckets[n];
+            !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
+            pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
+               ;
+
+       return pprev;
 }
 
-static struct bucket_table *bucket_table_alloc(size_t nbuckets)
+static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
+{
+       unsigned int i, size;
+#if defined(CONFIG_PROVE_LOCKING)
+       unsigned int nr_pcpus = 2;
+#else
+       unsigned int nr_pcpus = num_possible_cpus();
+#endif
+
+       nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
+       size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
+
+       /* Never allocate more than 0.5 locks per bucket */
+       size = min_t(unsigned int, size, tbl->size >> 1);
+
+       if (sizeof(spinlock_t) != 0) {
+#ifdef CONFIG_NUMA
+               if (size * sizeof(spinlock_t) > PAGE_SIZE)
+                       tbl->locks = vmalloc(size * sizeof(spinlock_t));
+               else
+#endif
+               tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
+                                          GFP_KERNEL);
+               if (!tbl->locks)
+                       return -ENOMEM;
+               for (i = 0; i < size; i++)
+                       spin_lock_init(&tbl->locks[i]);
+       }
+       tbl->locks_mask = size - 1;
+
+       return 0;
+}
+
+static void bucket_table_free(const struct bucket_table *tbl)
+{
+       if (tbl)
+               kvfree(tbl->locks);
+
+       kvfree(tbl);
+}
+
+static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
+                                              size_t nbuckets)
 {
        struct bucket_table *tbl;
        size_t size;
+       int i;
 
        size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
        tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
@@ -122,12 +231,15 @@ static struct bucket_table *bucket_table_alloc(size_t nbuckets)
 
        tbl->size = nbuckets;
 
-       return tbl;
-}
+       if (alloc_bucket_locks(ht, tbl) < 0) {
+               bucket_table_free(tbl);
+               return NULL;
+       }
 
-static void bucket_table_free(const struct bucket_table *tbl)
-{
-       kvfree(tbl);
+       for (i = 0; i < nbuckets; i++)
+               INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
+
+       return tbl;
 }
 
 /**
@@ -138,7 +250,8 @@ static void bucket_table_free(const struct bucket_table *tbl)
 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
 {
        /* Expand table when exceeding 75% load */
-       return ht->nelems > (new_size / 4 * 3);
+       return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
+              (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
 }
 EXPORT_SYMBOL_GPL(rht_grow_above_75);
 
@@ -150,41 +263,75 @@ EXPORT_SYMBOL_GPL(rht_grow_above_75);
 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
 {
        /* Shrink table beneath 30% load */
-       return ht->nelems < (new_size * 3 / 10);
+       return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
+              (atomic_read(&ht->shift) > ht->p.min_shift);
 }
 EXPORT_SYMBOL_GPL(rht_shrink_below_30);
 
-static void hashtable_chain_unzip(const struct rhashtable *ht,
+static void lock_buckets(struct bucket_table *new_tbl,
+                        struct bucket_table *old_tbl, unsigned int hash)
+       __acquires(old_bucket_lock)
+{
+       spin_lock_bh(bucket_lock(old_tbl, hash));
+       if (new_tbl != old_tbl)
+               spin_lock_bh_nested(bucket_lock(new_tbl, hash),
+                                   RHT_LOCK_NESTED);
+}
+
+static void unlock_buckets(struct bucket_table *new_tbl,
+                          struct bucket_table *old_tbl, unsigned int hash)
+       __releases(old_bucket_lock)
+{
+       if (new_tbl != old_tbl)
+               spin_unlock_bh(bucket_lock(new_tbl, hash));
+       spin_unlock_bh(bucket_lock(old_tbl, hash));
+}
+
+/**
+ * Unlink entries on bucket which hash to different bucket.
+ *
+ * Returns true if no more work needs to be performed on the bucket.
+ */
+static bool hashtable_chain_unzip(struct rhashtable *ht,
                                  const struct bucket_table *new_tbl,
-                                 struct bucket_table *old_tbl, size_t n)
+                                 struct bucket_table *old_tbl,
+                                 size_t old_hash)
 {
        struct rhash_head *he, *p, *next;
-       unsigned int h;
+       unsigned int new_hash, new_hash2;
+
+       ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash);
 
        /* Old bucket empty, no work needed. */
-       p = rht_dereference(old_tbl->buckets[n], ht);
-       if (!p)
-               return;
+       p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
+                                  old_hash);
+       if (rht_is_a_nulls(p))
+               return false;
+
+       new_hash = head_hashfn(ht, new_tbl, p);
+       ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
 
        /* Advance the old bucket pointer one or more times until it
         * reaches a node that doesn't hash to the same bucket as the
         * previous node p. Call the previous node p;
         */
-       h = head_hashfn(ht, p, new_tbl->size);
-       rht_for_each(he, p->next, ht) {
-               if (head_hashfn(ht, he, new_tbl->size) != h)
+       rht_for_each_continue(he, p->next, old_tbl, old_hash) {
+               new_hash2 = head_hashfn(ht, new_tbl, he);
+               ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2);
+
+               if (new_hash != new_hash2)
                        break;
                p = he;
        }
-       RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
+       rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
 
        /* Find the subsequent node which does hash to the same
         * bucket as node P, or NULL if no such node exists.
         */
-       next = NULL;
-       if (he) {
-               rht_for_each(he, he->next, ht) {
-                       if (head_hashfn(ht, he, new_tbl->size) == h) {
+       INIT_RHT_NULLS_HEAD(next, ht, old_hash);
+       if (!rht_is_a_nulls(he)) {
+               rht_for_each_continue(he, he->next, old_tbl, old_hash) {
+                       if (head_hashfn(ht, new_tbl, he) == new_hash) {
                                next = he;
                                break;
                        }
@@ -194,7 +341,20 @@ static void hashtable_chain_unzip(const struct rhashtable *ht,
        /* Set p's next pointer to that subsequent node pointer,
         * bypassing the nodes which do not hash to p's bucket
         */
-       RCU_INIT_POINTER(p->next, next);
+       rcu_assign_pointer(p->next, next);
+
+       p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
+                                  old_hash);
+
+       return !rht_is_a_nulls(p);
+}
+
+static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl,
+                           unsigned int new_hash, struct rhash_head *entry)
+{
+       ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
+
+       rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
 }
 
 /**
@@ -207,53 +367,57 @@ static void hashtable_chain_unzip(const struct rhashtable *ht,
  * This function may only be called in a context where it is safe to call
  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  *
- * The caller must ensure that no concurrent table mutations take place.
- * It is however valid to have concurrent lookups if they are RCU protected.
+ * The caller must ensure that no concurrent resizing occurs by holding
+ * ht->mutex.
+ *
+ * It is valid to have concurrent insertions and deletions protected by per
+ * bucket locks or concurrent RCU protected lookups and traversals.
  */
 int rhashtable_expand(struct rhashtable *ht)
 {
        struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
        struct rhash_head *he;
-       unsigned int i, h;
-       bool complete;
+       unsigned int new_hash, old_hash;
+       bool complete = false;
 
        ASSERT_RHT_MUTEX(ht);
 
-       if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
-               return 0;
-
-       new_tbl = bucket_table_alloc(old_tbl->size * 2);
+       new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
        if (new_tbl == NULL)
                return -ENOMEM;
 
-       ht->shift++;
+       atomic_inc(&ht->shift);
+
+       /* Make insertions go into the new, empty table right away. Deletions
+        * and lookups will be attempted in both tables until we synchronize.
+        * The synchronize_rcu() guarantees for the new table to be picked up
+        * so no new additions go into the old table while we relink.
+        */
+       rcu_assign_pointer(ht->future_tbl, new_tbl);
+       synchronize_rcu();
 
-       /* For each new bucket, search the corresponding old bucket
-        * for the first entry that hashes to the new bucket, and
-        * link the new bucket to that entry. Since all the entries
-        * which will end up in the new bucket appear in the same
-        * old bucket, this constructs an entirely valid new hash
-        * table, but with multiple buckets "zipped" together into a
-        * single imprecise chain.
+       /* For each new bucket, search the corresponding old bucket for the
+        * first entry that hashes to the new bucket, and link the end of
+        * newly formed bucket chain (containing entries added to future
+        * table) to that entry. Since all the entries which will end up in
+        * the new bucket appear in the same old bucket, this constructs an
+        * entirely valid new hash table, but with multiple buckets
+        * "zipped" together into a single imprecise chain.
         */
-       for (i = 0; i < new_tbl->size; i++) {
-               h = i & (old_tbl->size - 1);
-               rht_for_each(he, old_tbl->buckets[h], ht) {
-                       if (head_hashfn(ht, he, new_tbl->size) == i) {
-                               RCU_INIT_POINTER(new_tbl->buckets[i], he);
+       for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
+               old_hash = rht_bucket_index(old_tbl, new_hash);
+               lock_buckets(new_tbl, old_tbl, new_hash);
+               rht_for_each(he, old_tbl, old_hash) {
+                       if (head_hashfn(ht, new_tbl, he) == new_hash) {
+                               link_old_to_new(ht, new_tbl, new_hash, he);
                                break;
                        }
                }
+               unlock_buckets(new_tbl, old_tbl, new_hash);
        }
 
-       /* Publish the new table pointer. Lookups may now traverse
-        * the new table, but they will not benefit from any
-        * additional efficiency until later steps unzip the buckets.
-        */
-       rcu_assign_pointer(ht->tbl, new_tbl);
-
        /* Unzip interleaved hash chains */
-       do {
+       while (!complete && !ht->being_destroyed) {
                /* Wait for readers. All new readers will see the new
                 * table, and thus no references to the old table will
                 * remain.
@@ -265,12 +429,19 @@ int rhashtable_expand(struct rhashtable *ht)
                 * table): ...
                 */
                complete = true;
-               for (i = 0; i < old_tbl->size; i++) {
-                       hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
-                       if (old_tbl->buckets[i] != NULL)
+               for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
+                       lock_buckets(new_tbl, old_tbl, old_hash);
+
+                       if (hashtable_chain_unzip(ht, new_tbl, old_tbl,
+                                                 old_hash))
                                complete = false;
+
+                       unlock_buckets(new_tbl, old_tbl, old_hash);
                }
-       } while (!complete);
+       }
+
+       rcu_assign_pointer(ht->tbl, new_tbl);
+       synchronize_rcu();
 
        bucket_table_free(old_tbl);
        return 0;
@@ -284,45 +455,51 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
  * This function may only be called in a context where it is safe to call
  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  *
+ * The caller must ensure that no concurrent resizing occurs by holding
+ * ht->mutex.
+ *
  * The caller must ensure that no concurrent table mutations take place.
  * It is however valid to have concurrent lookups if they are RCU protected.
+ *
+ * It is valid to have concurrent insertions and deletions protected by per
+ * bucket locks or concurrent RCU protected lookups and traversals.
  */
 int rhashtable_shrink(struct rhashtable *ht)
 {
-       struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
-       struct rhash_head __rcu **pprev;
-       unsigned int i;
+       struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
+       unsigned int new_hash;
 
        ASSERT_RHT_MUTEX(ht);
 
-       if (ht->shift <= ht->p.min_shift)
-               return 0;
-
-       ntbl = bucket_table_alloc(tbl->size / 2);
-       if (ntbl == NULL)
+       new_tbl = bucket_table_alloc(ht, tbl->size / 2);
+       if (new_tbl == NULL)
                return -ENOMEM;
 
-       ht->shift--;
+       rcu_assign_pointer(ht->future_tbl, new_tbl);
+       synchronize_rcu();
 
-       /* Link each bucket in the new table to the first bucket
-        * in the old table that contains entries which will hash
-        * to the new bucket.
+       /* Link the first entry in the old bucket to the end of the
+        * bucket in the new table. As entries are concurrently being
+        * added to the new table, lock down the new bucket. As we
+        * always divide the size in half when shrinking, each bucket
+        * in the new table maps to exactly two buckets in the old
+        * table.
         */
-       for (i = 0; i < ntbl->size; i++) {
-               ntbl->buckets[i] = tbl->buckets[i];
+       for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
+               lock_buckets(new_tbl, tbl, new_hash);
 
-               /* Link each bucket in the new table to the first bucket
-                * in the old table that contains entries which will hash
-                * to the new bucket.
-                */
-               for (pprev = &ntbl->buckets[i]; *pprev != NULL;
-                    pprev = &rht_dereference(*pprev, ht)->next)
-                       ;
-               RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
+               rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
+                                  tbl->buckets[new_hash]);
+               ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size);
+               rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
+                                  tbl->buckets[new_hash + new_tbl->size]);
+
+               unlock_buckets(new_tbl, tbl, new_hash);
        }
 
        /* Publish the new, valid hash table */
-       rcu_assign_pointer(ht->tbl, ntbl);
+       rcu_assign_pointer(ht->tbl, new_tbl);
+       atomic_dec(&ht->shift);
 
        /* Wait for readers. No new readers will have references to the
         * old hash table.
@@ -335,59 +512,99 @@ int rhashtable_shrink(struct rhashtable *ht)
 }
 EXPORT_SYMBOL_GPL(rhashtable_shrink);
 
-/**
- * rhashtable_insert - insert object into hash hash table
- * @ht:                hash table
- * @obj:       pointer to hash head inside object
- *
- * Will automatically grow the table via rhashtable_expand() if the the
- * grow_decision function specified at rhashtable_init() returns true.
- *
- * The caller must ensure that no concurrent table mutations occur. It is
- * however valid to have concurrent lookups if they are RCU protected.
- */
-void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
+static void rht_deferred_worker(struct work_struct *work)
 {
-       struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
-       u32 hash;
+       struct rhashtable *ht;
+       struct bucket_table *tbl;
+       struct rhashtable_walker *walker;
 
-       ASSERT_RHT_MUTEX(ht);
+       ht = container_of(work, struct rhashtable, run_work);
+       mutex_lock(&ht->mutex);
+       if (ht->being_destroyed)
+               goto unlock;
 
-       hash = head_hashfn(ht, obj, tbl->size);
-       RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
-       rcu_assign_pointer(tbl->buckets[hash], obj);
-       ht->nelems++;
+       tbl = rht_dereference(ht->tbl, ht);
+
+       list_for_each_entry(walker, &ht->walkers, list)
+               walker->resize = true;
 
        if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
                rhashtable_expand(ht);
+       else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
+               rhashtable_shrink(ht);
+
+unlock:
+       mutex_unlock(&ht->mutex);
+}
+
+static void rhashtable_wakeup_worker(struct rhashtable *ht)
+{
+       struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+       struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
+       size_t size = tbl->size;
+
+       /* Only adjust the table if no resizing is currently in progress. */
+       if (tbl == new_tbl &&
+           ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
+            (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
+               schedule_work(&ht->run_work);
+}
+
+static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
+                               struct bucket_table *tbl, u32 hash)
+{
+       struct rhash_head *head;
+
+       hash = rht_bucket_index(tbl, hash);
+       head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
+
+       ASSERT_BUCKET_LOCK(ht, tbl, hash);
+
+       if (rht_is_a_nulls(head))
+               INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
+       else
+               RCU_INIT_POINTER(obj->next, head);
+
+       rcu_assign_pointer(tbl->buckets[hash], obj);
+
+       atomic_inc(&ht->nelems);
+
+       rhashtable_wakeup_worker(ht);
 }
-EXPORT_SYMBOL_GPL(rhashtable_insert);
 
 /**
- * rhashtable_remove_pprev - remove object from hash table given previous element
+ * rhashtable_insert - insert object into hash table
  * @ht:                hash table
  * @obj:       pointer to hash head inside object
- * @pprev:     pointer to previous element
  *
- * Identical to rhashtable_remove() but caller is alreayd aware of the element
- * in front of the element to be deleted. This is in particular useful for
- * deletion when combined with walking or lookup.
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
  */
-void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
-                            struct rhash_head __rcu **pprev)
+void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
 {
-       struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+       struct bucket_table *tbl, *old_tbl;
+       unsigned hash;
 
-       ASSERT_RHT_MUTEX(ht);
+       rcu_read_lock();
 
-       RCU_INIT_POINTER(*pprev, obj->next);
-       ht->nelems--;
+       tbl = rht_dereference_rcu(ht->future_tbl, ht);
+       old_tbl = rht_dereference_rcu(ht->tbl, ht);
+       hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
 
-       if (ht->p.shrink_decision &&
-           ht->p.shrink_decision(ht, tbl->size))
-               rhashtable_shrink(ht);
+       lock_buckets(tbl, old_tbl, hash);
+       __rhashtable_insert(ht, obj, tbl, hash);
+       unlock_buckets(tbl, old_tbl, hash);
+
+       rcu_read_unlock();
 }
-EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
+EXPORT_SYMBOL_GPL(rhashtable_insert);
 
 /**
  * rhashtable_remove - remove object from hash table
@@ -398,7 +615,7 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
  * walk the bucket chain upon removal. The removal operation is thus
  * considerable slow if the hash table is not correctly sized.
  *
- * Will automatically shrink the table via rhashtable_expand() if the the
+ * Will automatically shrink the table via rhashtable_expand() if the
  * shrink_decision function specified at rhashtable_init() returns true.
  *
  * The caller must ensure that no concurrent table mutations occur. It is
@@ -406,30 +623,87 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
  */
 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
 {
-       struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+       struct bucket_table *tbl, *new_tbl, *old_tbl;
        struct rhash_head __rcu **pprev;
-       struct rhash_head *he;
-       u32 h;
+       struct rhash_head *he, *he2;
+       unsigned int hash, new_hash;
+       bool ret = false;
 
-       ASSERT_RHT_MUTEX(ht);
-
-       h = head_hashfn(ht, obj, tbl->size);
-
-       pprev = &tbl->buckets[h];
-       rht_for_each(he, tbl->buckets[h], ht) {
+       rcu_read_lock();
+       old_tbl = rht_dereference_rcu(ht->tbl, ht);
+       tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
+       new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
+
+       lock_buckets(new_tbl, old_tbl, new_hash);
+restart:
+       hash = rht_bucket_index(tbl, new_hash);
+       pprev = &tbl->buckets[hash];
+       rht_for_each(he, tbl, hash) {
                if (he != obj) {
                        pprev = &he->next;
                        continue;
                }
 
-               rhashtable_remove_pprev(ht, he, pprev);
-               return true;
+               ASSERT_BUCKET_LOCK(ht, tbl, hash);
+
+               if (old_tbl->size > new_tbl->size && tbl == old_tbl &&
+                   !rht_is_a_nulls(obj->next) &&
+                   head_hashfn(ht, tbl, obj->next) != hash) {
+                       rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
+               } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) {
+                       rht_for_each_continue(he2, obj->next, tbl, hash) {
+                               if (head_hashfn(ht, tbl, he2) == hash) {
+                                       rcu_assign_pointer(*pprev, he2);
+                                       goto found;
+                               }
+                       }
+
+                       rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
+               } else {
+                       rcu_assign_pointer(*pprev, obj->next);
+               }
+
+found:
+               ret = true;
+               break;
+       }
+
+       /* The entry may be linked in either 'tbl', 'future_tbl', or both.
+        * 'future_tbl' only exists for a short period of time during
+        * resizing. Thus traversing both is fine and the added cost is
+        * very rare.
+        */
+       if (tbl != old_tbl) {
+               tbl = old_tbl;
+               goto restart;
+       }
+
+       unlock_buckets(new_tbl, old_tbl, new_hash);
+
+       if (ret) {
+               atomic_dec(&ht->nelems);
+               rhashtable_wakeup_worker(ht);
        }
 
-       return false;
+       rcu_read_unlock();
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(rhashtable_remove);
 
+struct rhashtable_compare_arg {
+       struct rhashtable *ht;
+       const void *key;
+};
+
+static bool rhashtable_compare(void *ptr, void *arg)
+{
+       struct rhashtable_compare_arg *x = arg;
+       struct rhashtable *ht = x->ht;
+
+       return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
+}
+
 /**
  * rhashtable_lookup - lookup key in hash table
  * @ht:                hash table
@@ -439,65 +713,313 @@ EXPORT_SYMBOL_GPL(rhashtable_remove);
  * for a entry with an identical key. The first matching entry is returned.
  *
  * This lookup function may only be used for fixed key hash table (key_len
- * paramter set). It will BUG() if used inappropriately.
+ * parameter set). It will BUG() if used inappropriately.
  *
- * Lookups may occur in parallel with hash mutations as long as the lookup is
- * guarded by rcu_read_lock(). The caller must take care of this.
+ * Lookups may occur in parallel with hashtable mutations and resizing.
  */
-void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
+void *rhashtable_lookup(struct rhashtable *ht, const void *key)
 {
-       const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
-       struct rhash_head *he;
-       u32 h;
+       struct rhashtable_compare_arg arg = {
+               .ht = ht,
+               .key = key,
+       };
 
        BUG_ON(!ht->p.key_len);
 
-       h = __hashfn(ht, key, ht->p.key_len, tbl->size);
-       rht_for_each_rcu(he, tbl->buckets[h], ht) {
-               if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
-                          ht->p.key_len))
-                       continue;
-               return (void *) he - ht->p.head_offset;
-       }
-
-       return NULL;
+       return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
 }
 EXPORT_SYMBOL_GPL(rhashtable_lookup);
 
 /**
  * rhashtable_lookup_compare - search hash table with compare function
  * @ht:                hash table
- * @hash:      hash value of desired entry
+ * @key:       the pointer to the key
  * @compare:   compare function, must return true on match
  * @arg:       argument passed on to compare function
  *
  * Traverses the bucket chain behind the provided hash value and calls the
  * specified compare function for each entry.
  *
- * Lookups may occur in parallel with hash mutations as long as the lookup is
- * guarded by rcu_read_lock(). The caller must take care of this.
+ * Lookups may occur in parallel with hashtable mutations and resizing.
  *
  * Returns the first entry on which the compare function returned true.
  */
-void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
+void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
                                bool (*compare)(void *, void *), void *arg)
 {
-       const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+       const struct bucket_table *tbl, *old_tbl;
        struct rhash_head *he;
+       u32 hash;
 
-       if (unlikely(hash >= tbl->size))
-               return NULL;
+       rcu_read_lock();
 
-       rht_for_each_rcu(he, tbl->buckets[hash], ht) {
+       old_tbl = rht_dereference_rcu(ht->tbl, ht);
+       tbl = rht_dereference_rcu(ht->future_tbl, ht);
+       hash = key_hashfn(ht, key, ht->p.key_len);
+restart:
+       rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
                if (!compare(rht_obj(ht, he), arg))
                        continue;
-               return (void *) he - ht->p.head_offset;
+               rcu_read_unlock();
+               return rht_obj(ht, he);
+       }
+
+       if (unlikely(tbl != old_tbl)) {
+               tbl = old_tbl;
+               goto restart;
        }
+       rcu_read_unlock();
 
        return NULL;
 }
 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
 
+/**
+ * rhashtable_lookup_insert - lookup and insert object into hash table
+ * @ht:                hash table
+ * @obj:       pointer to hash head inside object
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * This lookup function may only be used for fixed key hash table (key_len
+ * parameter set). It will BUG() if used inappropriately.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
+{
+       struct rhashtable_compare_arg arg = {
+               .ht = ht,
+               .key = rht_obj(ht, obj) + ht->p.key_offset,
+       };
+
+       BUG_ON(!ht->p.key_len);
+
+       return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
+                                               &arg);
+}
+EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
+
+/**
+ * rhashtable_lookup_compare_insert - search and insert object to hash table
+ *                                    with compare function
+ * @ht:                hash table
+ * @obj:       pointer to hash head inside object
+ * @compare:   compare function, must return true on match
+ * @arg:       argument passed on to compare function
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * Lookups may occur in parallel with hashtable mutations and resizing.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
+                                     struct rhash_head *obj,
+                                     bool (*compare)(void *, void *),
+                                     void *arg)
+{
+       struct bucket_table *new_tbl, *old_tbl;
+       u32 new_hash;
+       bool success = true;
+
+       BUG_ON(!ht->p.key_len);
+
+       rcu_read_lock();
+       old_tbl = rht_dereference_rcu(ht->tbl, ht);
+       new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
+       new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
+
+       lock_buckets(new_tbl, old_tbl, new_hash);
+
+       if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
+                                     compare, arg)) {
+               success = false;
+               goto exit;
+       }
+
+       __rhashtable_insert(ht, obj, new_tbl, new_hash);
+
+exit:
+       unlock_buckets(new_tbl, old_tbl, new_hash);
+       rcu_read_unlock();
+
+       return success;
+}
+EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
+
+/**
+ * rhashtable_walk_init - Initialise an iterator
+ * @ht:                Table to walk over
+ * @iter:      Hash table Iterator
+ *
+ * This function prepares a hash table walk.
+ *
+ * Note that if you restart a walk after rhashtable_walk_stop you
+ * may see the same object twice.  Also, you may miss objects if
+ * there are removals in between rhashtable_walk_stop and the next
+ * call to rhashtable_walk_start.
+ *
+ * For a completely stable walk you should construct your own data
+ * structure outside the hash table.
+ *
+ * This function may sleep so you must not call it from interrupt
+ * context or with spin locks held.
+ *
+ * You must call rhashtable_walk_exit if this function returns
+ * successfully.
+ */
+int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
+{
+       iter->ht = ht;
+       iter->p = NULL;
+       iter->slot = 0;
+       iter->skip = 0;
+
+       iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
+       if (!iter->walker)
+               return -ENOMEM;
+
+       mutex_lock(&ht->mutex);
+       list_add(&iter->walker->list, &ht->walkers);
+       mutex_unlock(&ht->mutex);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_init);
+
+/**
+ * rhashtable_walk_exit - Free an iterator
+ * @iter:      Hash table Iterator
+ *
+ * This function frees resources allocated by rhashtable_walk_init.
+ */
+void rhashtable_walk_exit(struct rhashtable_iter *iter)
+{
+       mutex_lock(&iter->ht->mutex);
+       list_del(&iter->walker->list);
+       mutex_unlock(&iter->ht->mutex);
+       kfree(iter->walker);
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
+
+/**
+ * rhashtable_walk_start - Start a hash table walk
+ * @iter:      Hash table iterator
+ *
+ * Start a hash table walk.  Note that we take the RCU lock in all
+ * cases including when we return an error.  So you must always call
+ * rhashtable_walk_stop to clean up.
+ *
+ * Returns zero if successful.
+ *
+ * Returns -EAGAIN if resize event occured.  Note that the iterator
+ * will rewind back to the beginning and you may use it immediately
+ * by calling rhashtable_walk_next.
+ */
+int rhashtable_walk_start(struct rhashtable_iter *iter)
+{
+       rcu_read_lock();
+
+       if (iter->walker->resize) {
+               iter->slot = 0;
+               iter->skip = 0;
+               iter->walker->resize = false;
+               return -EAGAIN;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_start);
+
+/**
+ * rhashtable_walk_next - Return the next object and advance the iterator
+ * @iter:      Hash table iterator
+ *
+ * Note that you must call rhashtable_walk_stop when you are finished
+ * with the walk.
+ *
+ * Returns the next object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occured.  Note that the iterator
+ * will rewind back to the beginning and you may continue to use it.
+ */
+void *rhashtable_walk_next(struct rhashtable_iter *iter)
+{
+       const struct bucket_table *tbl;
+       struct rhashtable *ht = iter->ht;
+       struct rhash_head *p = iter->p;
+       void *obj = NULL;
+
+       tbl = rht_dereference_rcu(ht->tbl, ht);
+
+       if (p) {
+               p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
+               goto next;
+       }
+
+       for (; iter->slot < tbl->size; iter->slot++) {
+               int skip = iter->skip;
+
+               rht_for_each_rcu(p, tbl, iter->slot) {
+                       if (!skip)
+                               break;
+                       skip--;
+               }
+
+next:
+               if (!rht_is_a_nulls(p)) {
+                       iter->skip++;
+                       iter->p = p;
+                       obj = rht_obj(ht, p);
+                       goto out;
+               }
+
+               iter->skip = 0;
+       }
+
+       iter->p = NULL;
+
+out:
+       if (iter->walker->resize) {
+               iter->p = NULL;
+               iter->slot = 0;
+               iter->skip = 0;
+               iter->walker->resize = false;
+               return ERR_PTR(-EAGAIN);
+       }
+
+       return obj;
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_next);
+
+/**
+ * rhashtable_walk_stop - Finish a hash table walk
+ * @iter:      Hash table iterator
+ *
+ * Finish a hash table walk.
+ */
+void rhashtable_walk_stop(struct rhashtable_iter *iter)
+{
+       rcu_read_unlock();
+       iter->p = NULL;
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
+
 static size_t rounded_hashtable_size(struct rhashtable_params *params)
 {
        return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
@@ -525,9 +1047,7 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params)
  *     .key_offset = offsetof(struct test_obj, key),
  *     .key_len = sizeof(int),
  *     .hashfn = jhash,
- * #ifdef CONFIG_PROVE_LOCKING
- *     .mutex_is_held = &my_mutex_is_held,
- * #endif
+ *     .nulls_base = (1U << RHT_BASE_SHIFT),
  * };
  *
  * Configuration Example 2: Variable length keys
@@ -547,9 +1067,6 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params)
  *     .head_offset = offsetof(struct test_obj, node),
  *     .hashfn = jhash,
  *     .obj_hashfn = my_hash_fn,
- * #ifdef CONFIG_PROVE_LOCKING
- *     .mutex_is_held = &my_mutex_is_held,
- * #endif
  * };
  */
 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
@@ -563,24 +1080,40 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
            (!params->key_len && !params->obj_hashfn))
                return -EINVAL;
 
+       if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
+               return -EINVAL;
+
        params->min_shift = max_t(size_t, params->min_shift,
                                  ilog2(HASH_MIN_SIZE));
 
        if (params->nelem_hint)
                size = rounded_hashtable_size(params);
 
-       tbl = bucket_table_alloc(size);
+       memset(ht, 0, sizeof(*ht));
+       mutex_init(&ht->mutex);
+       memcpy(&ht->p, params, sizeof(*params));
+       INIT_LIST_HEAD(&ht->walkers);
+
+       if (params->locks_mul)
+               ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
+       else
+               ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
+
+       tbl = bucket_table_alloc(ht, size);
        if (tbl == NULL)
                return -ENOMEM;
 
-       memset(ht, 0, sizeof(*ht));
-       ht->shift = ilog2(tbl->size);
-       memcpy(&ht->p, params, sizeof(*params));
+       atomic_set(&ht->nelems, 0);
+       atomic_set(&ht->shift, ilog2(tbl->size));
        RCU_INIT_POINTER(ht->tbl, tbl);
+       RCU_INIT_POINTER(ht->future_tbl, tbl);
 
        if (!ht->p.hash_rnd)
                get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
 
+       if (ht->p.grow_decision || ht->p.shrink_decision)
+               INIT_WORK(&ht->run_work, rht_deferred_worker);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(rhashtable_init);
@@ -593,216 +1126,15 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
  * has to make sure that no resizing may happen by unpublishing the hashtable
  * and waiting for the quiescent cycle before releasing the bucket array.
  */
-void rhashtable_destroy(const struct rhashtable *ht)
+void rhashtable_destroy(struct rhashtable *ht)
 {
-       bucket_table_free(ht->tbl);
-}
-EXPORT_SYMBOL_GPL(rhashtable_destroy);
-
-/**************************************************************************
- * Self Test
- **************************************************************************/
-
-#ifdef CONFIG_TEST_RHASHTABLE
+       ht->being_destroyed = true;
 
-#define TEST_HT_SIZE   8
-#define TEST_ENTRIES   2048
-#define TEST_PTR       ((void *) 0xdeadbeef)
-#define TEST_NEXPANDS  4
+       if (ht->p.grow_decision || ht->p.shrink_decision)
+               cancel_work_sync(&ht->run_work);
 
-#ifdef CONFIG_PROVE_LOCKING
-static int test_mutex_is_held(void *parent)
-{
-       return 1;
+       mutex_lock(&ht->mutex);
+       bucket_table_free(rht_dereference(ht->tbl, ht));
+       mutex_unlock(&ht->mutex);
 }
-#endif
-
-struct test_obj {
-       void                    *ptr;
-       int                     value;
-       struct rhash_head       node;
-};
-
-static int __init test_rht_lookup(struct rhashtable *ht)
-{
-       unsigned int i;
-
-       for (i = 0; i < TEST_ENTRIES * 2; i++) {
-               struct test_obj *obj;
-               bool expected = !(i % 2);
-               u32 key = i;
-
-               obj = rhashtable_lookup(ht, &key);
-
-               if (expected && !obj) {
-                       pr_warn("Test failed: Could not find key %u\n", key);
-                       return -ENOENT;
-               } else if (!expected && obj) {
-                       pr_warn("Test failed: Unexpected entry found for key %u\n",
-                               key);
-                       return -EEXIST;
-               } else if (expected && obj) {
-                       if (obj->ptr != TEST_PTR || obj->value != i) {
-                               pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
-                                       obj->ptr, TEST_PTR, obj->value, i);
-                               return -EINVAL;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static void test_bucket_stats(struct rhashtable *ht, bool quiet)
-{
-       unsigned int cnt, rcu_cnt, i, total = 0;
-       struct test_obj *obj;
-       struct bucket_table *tbl;
-
-       tbl = rht_dereference_rcu(ht->tbl, ht);
-       for (i = 0; i < tbl->size; i++) {
-               rcu_cnt = cnt = 0;
-
-               if (!quiet)
-                       pr_info(" [%#4x/%zu]", i, tbl->size);
-
-               rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
-                       cnt++;
-                       total++;
-                       if (!quiet)
-                               pr_cont(" [%p],", obj);
-               }
-
-               rht_for_each_entry_rcu(obj, tbl->buckets[i], node)
-                       rcu_cnt++;
-
-               if (rcu_cnt != cnt)
-                       pr_warn("Test failed: Chain count mismach %d != %d",
-                               cnt, rcu_cnt);
-
-               if (!quiet)
-                       pr_cont("\n  [%#x] first element: %p, chain length: %u\n",
-                               i, tbl->buckets[i], cnt);
-       }
-
-       pr_info("  Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
-               total, ht->nelems, TEST_ENTRIES);
-
-       if (total != ht->nelems || total != TEST_ENTRIES)
-               pr_warn("Test failed: Total count mismatch ^^^");
-}
-
-static int __init test_rhashtable(struct rhashtable *ht)
-{
-       struct bucket_table *tbl;
-       struct test_obj *obj, *next;
-       int err;
-       unsigned int i;
-
-       /*
-        * Insertion Test:
-        * Insert TEST_ENTRIES into table with all keys even numbers
-        */
-       pr_info("  Adding %d keys\n", TEST_ENTRIES);
-       for (i = 0; i < TEST_ENTRIES; i++) {
-               struct test_obj *obj;
-
-               obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-               if (!obj) {
-                       err = -ENOMEM;
-                       goto error;
-               }
-
-               obj->ptr = TEST_PTR;
-               obj->value = i * 2;
-
-               rhashtable_insert(ht, &obj->node);
-       }
-
-       rcu_read_lock();
-       test_bucket_stats(ht, true);
-       test_rht_lookup(ht);
-       rcu_read_unlock();
-
-       for (i = 0; i < TEST_NEXPANDS; i++) {
-               pr_info("  Table expansion iteration %u...\n", i);
-               rhashtable_expand(ht);
-
-               rcu_read_lock();
-               pr_info("  Verifying lookups...\n");
-               test_rht_lookup(ht);
-               rcu_read_unlock();
-       }
-
-       for (i = 0; i < TEST_NEXPANDS; i++) {
-               pr_info("  Table shrinkage iteration %u...\n", i);
-               rhashtable_shrink(ht);
-
-               rcu_read_lock();
-               pr_info("  Verifying lookups...\n");
-               test_rht_lookup(ht);
-               rcu_read_unlock();
-       }
-
-       rcu_read_lock();
-       test_bucket_stats(ht, true);
-       rcu_read_unlock();
-
-       pr_info("  Deleting %d keys\n", TEST_ENTRIES);
-       for (i = 0; i < TEST_ENTRIES; i++) {
-               u32 key = i * 2;
-
-               obj = rhashtable_lookup(ht, &key);
-               BUG_ON(!obj);
-
-               rhashtable_remove(ht, &obj->node);
-               kfree(obj);
-       }
-
-       return 0;
-
-error:
-       tbl = rht_dereference_rcu(ht->tbl, ht);
-       for (i = 0; i < tbl->size; i++)
-               rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
-                       kfree(obj);
-
-       return err;
-}
-
-static int __init test_rht_init(void)
-{
-       struct rhashtable ht;
-       struct rhashtable_params params = {
-               .nelem_hint = TEST_HT_SIZE,
-               .head_offset = offsetof(struct test_obj, node),
-               .key_offset = offsetof(struct test_obj, value),
-               .key_len = sizeof(int),
-               .hashfn = jhash,
-#ifdef CONFIG_PROVE_LOCKING
-               .mutex_is_held = &test_mutex_is_held,
-#endif
-               .grow_decision = rht_grow_above_75,
-               .shrink_decision = rht_shrink_below_30,
-       };
-       int err;
-
-       pr_info("Running resizable hashtable tests...\n");
-
-       err = rhashtable_init(&ht, &params);
-       if (err < 0) {
-               pr_warn("Test failed: Unable to initialize hashtable: %d\n",
-                       err);
-               return err;
-       }
-
-       err = test_rhashtable(&ht);
-
-       rhashtable_destroy(&ht);
-
-       return err;
-}
-
-subsys_initcall(test_rht_init);
-
-#endif /* CONFIG_TEST_RHASHTABLE */
+EXPORT_SYMBOL_GPL(rhashtable_destroy);
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
new file mode 100644 (file)
index 0000000..1dfeba7
--- /dev/null
@@ -0,0 +1,227 @@
+/*
+ * Resizable, Scalable, Concurrent Hash Table
+ *
+ * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
+ *
+ * Based on the following paper:
+ * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
+ *
+ * Code partially derived from nft_hash
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/**************************************************************************
+ * Self Test
+ **************************************************************************/
+
+#include <linux/init.h>
+#include <linux/jhash.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rcupdate.h>
+#include <linux/rhashtable.h>
+#include <linux/slab.h>
+
+
+#define TEST_HT_SIZE   8
+#define TEST_ENTRIES   2048
+#define TEST_PTR       ((void *) 0xdeadbeef)
+#define TEST_NEXPANDS  4
+
+struct test_obj {
+       void                    *ptr;
+       int                     value;
+       struct rhash_head       node;
+};
+
+static int __init test_rht_lookup(struct rhashtable *ht)
+{
+       unsigned int i;
+
+       for (i = 0; i < TEST_ENTRIES * 2; i++) {
+               struct test_obj *obj;
+               bool expected = !(i % 2);
+               u32 key = i;
+
+               obj = rhashtable_lookup(ht, &key);
+
+               if (expected && !obj) {
+                       pr_warn("Test failed: Could not find key %u\n", key);
+                       return -ENOENT;
+               } else if (!expected && obj) {
+                       pr_warn("Test failed: Unexpected entry found for key %u\n",
+                               key);
+                       return -EEXIST;
+               } else if (expected && obj) {
+                       if (obj->ptr != TEST_PTR || obj->value != i) {
+                               pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
+                                       obj->ptr, TEST_PTR, obj->value, i);
+                               return -EINVAL;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static void test_bucket_stats(struct rhashtable *ht, bool quiet)
+{
+       unsigned int cnt, rcu_cnt, i, total = 0;
+       struct rhash_head *pos;
+       struct test_obj *obj;
+       struct bucket_table *tbl;
+
+       tbl = rht_dereference_rcu(ht->tbl, ht);
+       for (i = 0; i < tbl->size; i++) {
+               rcu_cnt = cnt = 0;
+
+               if (!quiet)
+                       pr_info(" [%#4x/%zu]", i, tbl->size);
+
+               rht_for_each_entry_rcu(obj, pos, tbl, i, node) {
+                       cnt++;
+                       total++;
+                       if (!quiet)
+                               pr_cont(" [%p],", obj);
+               }
+
+               rht_for_each_entry_rcu(obj, pos, tbl, i, node)
+                       rcu_cnt++;
+
+               if (rcu_cnt != cnt)
+                       pr_warn("Test failed: Chain count mismach %d != %d",
+                               cnt, rcu_cnt);
+
+               if (!quiet)
+                       pr_cont("\n  [%#x] first element: %p, chain length: %u\n",
+                               i, tbl->buckets[i], cnt);
+       }
+
+       pr_info("  Traversal complete: counted=%u, nelems=%u, entries=%d\n",
+               total, atomic_read(&ht->nelems), TEST_ENTRIES);
+
+       if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES)
+               pr_warn("Test failed: Total count mismatch ^^^");
+}
+
+static int __init test_rhashtable(struct rhashtable *ht)
+{
+       struct bucket_table *tbl;
+       struct test_obj *obj;
+       struct rhash_head *pos, *next;
+       int err;
+       unsigned int i;
+
+       /*
+        * Insertion Test:
+        * Insert TEST_ENTRIES into table with all keys even numbers
+        */
+       pr_info("  Adding %d keys\n", TEST_ENTRIES);
+       for (i = 0; i < TEST_ENTRIES; i++) {
+               struct test_obj *obj;
+
+               obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+               if (!obj) {
+                       err = -ENOMEM;
+                       goto error;
+               }
+
+               obj->ptr = TEST_PTR;
+               obj->value = i * 2;
+
+               rhashtable_insert(ht, &obj->node);
+       }
+
+       rcu_read_lock();
+       test_bucket_stats(ht, true);
+       test_rht_lookup(ht);
+       rcu_read_unlock();
+
+       for (i = 0; i < TEST_NEXPANDS; i++) {
+               pr_info("  Table expansion iteration %u...\n", i);
+               mutex_lock(&ht->mutex);
+               rhashtable_expand(ht);
+               mutex_unlock(&ht->mutex);
+
+               rcu_read_lock();
+               pr_info("  Verifying lookups...\n");
+               test_rht_lookup(ht);
+               rcu_read_unlock();
+       }
+
+       for (i = 0; i < TEST_NEXPANDS; i++) {
+               pr_info("  Table shrinkage iteration %u...\n", i);
+               mutex_lock(&ht->mutex);
+               rhashtable_shrink(ht);
+               mutex_unlock(&ht->mutex);
+
+               rcu_read_lock();
+               pr_info("  Verifying lookups...\n");
+               test_rht_lookup(ht);
+               rcu_read_unlock();
+       }
+
+       rcu_read_lock();
+       test_bucket_stats(ht, true);
+       rcu_read_unlock();
+
+       pr_info("  Deleting %d keys\n", TEST_ENTRIES);
+       for (i = 0; i < TEST_ENTRIES; i++) {
+               u32 key = i * 2;
+
+               obj = rhashtable_lookup(ht, &key);
+               BUG_ON(!obj);
+
+               rhashtable_remove(ht, &obj->node);
+               kfree(obj);
+       }
+
+       return 0;
+
+error:
+       tbl = rht_dereference_rcu(ht->tbl, ht);
+       for (i = 0; i < tbl->size; i++)
+               rht_for_each_entry_safe(obj, pos, next, tbl, i, node)
+                       kfree(obj);
+
+       return err;
+}
+
+static int __init test_rht_init(void)
+{
+       struct rhashtable ht;
+       struct rhashtable_params params = {
+               .nelem_hint = TEST_HT_SIZE,
+               .head_offset = offsetof(struct test_obj, node),
+               .key_offset = offsetof(struct test_obj, value),
+               .key_len = sizeof(int),
+               .hashfn = jhash,
+               .nulls_base = (3U << RHT_BASE_SHIFT),
+               .grow_decision = rht_grow_above_75,
+               .shrink_decision = rht_shrink_below_30,
+       };
+       int err;
+
+       pr_info("Running resizable hashtable tests...\n");
+
+       err = rhashtable_init(&ht, &params);
+       if (err < 0) {
+               pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+                       err);
+               return err;
+       }
+
+       err = test_rhashtable(&ht);
+
+       rhashtable_destroy(&ht);
+
+       return err;
+}
+
+module_init(test_rht_init);
+
+MODULE_LICENSE("GPL v2");
index 56badfc4810a8a4e70597ce82a0532929e6e6dda..957d3da53dddcd53b72da82e39f77d951db57a66 100644 (file)
@@ -14,7 +14,6 @@ config DEBUG_PAGEALLOC
        depends on !KMEMCHECK
        select PAGE_EXTENSION
        select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
-       select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
        ---help---
          Unmap pages from the kernel linear mapping after free_pages().
          This results in a large slowdown, but helps to find certain types
@@ -27,13 +26,5 @@ config DEBUG_PAGEALLOC
          that would result in incorrect warnings of memory corruption after
          a resume because free pages are not saved to the suspend image.
 
-config WANT_PAGE_DEBUG_FLAGS
-       bool
-
 config PAGE_POISONING
        bool
-       select WANT_PAGE_DEBUG_FLAGS
-
-config PAGE_GUARD
-       bool
-       select WANT_PAGE_DEBUG_FLAGS
index a900759cc8075fc8b0da9a37ebf6f93de34d8d10..8dd50ce6326fd50540b24fe6e93d12546b20594a 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -296,7 +296,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
                        return -ENOMEM;
                if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
                        return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
-               if (ret & VM_FAULT_SIGBUS)
+               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
                        return -EFAULT;
                BUG();
        }
@@ -571,7 +571,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
                        return -ENOMEM;
                if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
                        return -EHWPOISON;
-               if (ret & VM_FAULT_SIGBUS)
+               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
                        return -EFAULT;
                BUG();
        }
index d247efab5073abfaeb9c11e33fb87a2fbb8ebdb0..15647fb0394fabc54b10206bf35590aa69b5830c 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
                else
                        ret = VM_FAULT_WRITE;
                put_page(page);
-       } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
+       } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
        /*
         * We must loop because handle_mm_fault() may back out if there's
         * any difficulty e.g. if pte accessed bit gets updated concurrently.
index ef91e856c7e456a0674e7b76e15cbd771a6c9acb..683b4782019b2c32626645155bb8175f3bf3a4a5 100644 (file)
@@ -1477,9 +1477,9 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 
        pr_info("Task in ");
        pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
-       pr_info(" killed as a result of limit of ");
+       pr_cont(" killed as a result of limit of ");
        pr_cont_cgroup_path(memcg->css.cgroup);
-       pr_info("\n");
+       pr_cont("\n");
 
        rcu_read_unlock();
 
@@ -3043,18 +3043,6 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
        if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
                mem_cgroup_swap_statistics(from, false);
                mem_cgroup_swap_statistics(to, true);
-               /*
-                * This function is only called from task migration context now.
-                * It postpones page_counter and refcount handling till the end
-                * of task migration(mem_cgroup_clear_mc()) for performance
-                * improvement. But we cannot postpone css_get(to)  because if
-                * the process that has been moved to @to does swap-in, the
-                * refcount of @to might be decreased to 0.
-                *
-                * We are in attach() phase, so the cgroup is guaranteed to be
-                * alive, so we can just call css_get().
-                */
-               css_get(&to->css);
                return 0;
        }
        return -EINVAL;
@@ -4679,6 +4667,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        if (parent_css == NULL) {
                root_mem_cgroup = memcg;
                page_counter_init(&memcg->memory, NULL);
+               memcg->soft_limit = PAGE_COUNTER_MAX;
                page_counter_init(&memcg->memsw, NULL);
                page_counter_init(&memcg->kmem, NULL);
        }
@@ -4724,6 +4713,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
 
        if (parent->use_hierarchy) {
                page_counter_init(&memcg->memory, &parent->memory);
+               memcg->soft_limit = PAGE_COUNTER_MAX;
                page_counter_init(&memcg->memsw, &parent->memsw);
                page_counter_init(&memcg->kmem, &parent->kmem);
 
@@ -4733,6 +4723,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
                 */
        } else {
                page_counter_init(&memcg->memory, NULL);
+               memcg->soft_limit = PAGE_COUNTER_MAX;
                page_counter_init(&memcg->memsw, NULL);
                page_counter_init(&memcg->kmem, NULL);
                /*
@@ -4807,7 +4798,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
        mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
        mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
        memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
-       memcg->soft_limit = 0;
+       memcg->soft_limit = PAGE_COUNTER_MAX;
 }
 
 #ifdef CONFIG_MMU
index ca920d1fd314a17c7250d7916bd37403afa96b79..2c3536cc6c6327c9c3e58eddf75c264ddaa11911 100644 (file)
@@ -235,6 +235,9 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
 
 static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
+       if (!tlb->end)
+               return;
+
        tlb_flush(tlb);
        mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
@@ -247,7 +250,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 {
        struct mmu_gather_batch *batch;
 
-       for (batch = &tlb->local; batch; batch = batch->next) {
+       for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
                free_pages_and_swap_cache(batch->pages, batch->nr);
                batch->nr = 0;
        }
@@ -256,9 +259,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 
 void tlb_flush_mmu(struct mmu_gather *tlb)
 {
-       if (!tlb->end)
-               return;
-
        tlb_flush_mmu_tlbonly(tlb);
        tlb_flush_mmu_free(tlb);
 }
@@ -2137,17 +2137,24 @@ reuse:
                if (!dirty_page)
                        return ret;
 
-               /*
-                * Yes, Virginia, this is actually required to prevent a race
-                * with clear_page_dirty_for_io() from clearing the page dirty
-                * bit after it clear all dirty ptes, but before a racing
-                * do_wp_page installs a dirty pte.
-                *
-                * do_shared_fault is protected similarly.
-                */
                if (!page_mkwrite) {
-                       wait_on_page_locked(dirty_page);
-                       set_page_dirty_balance(dirty_page);
+                       struct address_space *mapping;
+                       int dirtied;
+
+                       lock_page(dirty_page);
+                       dirtied = set_page_dirty(dirty_page);
+                       VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page);
+                       mapping = dirty_page->mapping;
+                       unlock_page(dirty_page);
+
+                       if (dirtied && mapping) {
+                               /*
+                                * Some device drivers do not set page.mapping
+                                * but still dirty their pages
+                                */
+                               balance_dirty_pages_ratelimited(mapping);
+                       }
+
                        /* file_update_time outside page_lock */
                        if (vma->vm_file)
                                file_update_time(vma->vm_file);
@@ -2593,7 +2600,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
                if (prev && prev->vm_end == address)
                        return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
 
-               expand_downwards(vma, address - PAGE_SIZE);
+               return expand_downwards(vma, address - PAGE_SIZE);
        }
        if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
                struct vm_area_struct *next = vma->vm_next;
@@ -2602,7 +2609,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
                if (next && next->vm_start == address + PAGE_SIZE)
                        return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
 
-               expand_upwards(vma, address + PAGE_SIZE);
+               return expand_upwards(vma, address + PAGE_SIZE);
        }
        return 0;
 }
@@ -2625,7 +2632,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        /* Check if we need to add a guard page to the stack */
        if (check_stack_guard_page(vma, address) < 0)
-               return VM_FAULT_SIGBUS;
+               return VM_FAULT_SIGSEGV;
 
        /* Use the zero-page for reads */
        if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
index 7b36aa7cc89a43c7c5909b7799b77d13106a2929..7f684d5a808738c3c645798bc6da8b110fbb2383 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -778,10 +778,12 @@ again:                    remove_next = 1 + (end > next->vm_end);
                if (exporter && exporter->anon_vma && !importer->anon_vma) {
                        int error;
 
+                       importer->anon_vma = exporter->anon_vma;
                        error = anon_vma_clone(importer, exporter);
-                       if (error)
+                       if (error) {
+                               importer->anon_vma = NULL;
                                return error;
-                       importer->anon_vma = exporter->anon_vma;
+                       }
                }
        }
 
@@ -2099,14 +2101,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
 {
        struct mm_struct *mm = vma->vm_mm;
        struct rlimit *rlim = current->signal->rlim;
-       unsigned long new_start;
+       unsigned long new_start, actual_size;
 
        /* address space limit tests */
        if (!may_expand_vm(mm, grow))
                return -ENOMEM;
 
        /* Stack limit test */
-       if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+       actual_size = size;
+       if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
+               actual_size -= PAGE_SIZE;
+       if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
                return -ENOMEM;
 
        /* mlock limit tests */
index d5d81f5384d16f09076fdb4dc06cdf45f6d238ef..6f4335238e33311de251a647fe725d06d5897060 100644 (file)
@@ -1541,16 +1541,6 @@ pause:
                bdi_start_background_writeback(bdi);
 }
 
-void set_page_dirty_balance(struct page *page)
-{
-       if (set_page_dirty(page)) {
-               struct address_space *mapping = page_mapping(page);
-
-               if (mapping)
-                       balance_dirty_pages_ratelimited(mapping);
-       }
-}
-
 static DEFINE_PER_CPU(int, bdp_ratelimits);
 
 /*
@@ -2123,32 +2113,25 @@ EXPORT_SYMBOL(account_page_dirtied);
  * page dirty in that case, but not all the buffers.  This is a "bottom-up"
  * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
  *
- * Most callers have locked the page, which pins the address_space in memory.
- * But zap_pte_range() does not lock the page, however in that case the
- * mapping is pinned by the vma's ->vm_file reference.
- *
- * We take care to handle the case where the page was truncated from the
- * mapping by re-checking page_mapping() inside tree_lock.
+ * The caller must ensure this doesn't race with truncation.  Most will simply
+ * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
+ * the pte lock held, which also locks out truncation.
  */
 int __set_page_dirty_nobuffers(struct page *page)
 {
        if (!TestSetPageDirty(page)) {
                struct address_space *mapping = page_mapping(page);
-               struct address_space *mapping2;
                unsigned long flags;
 
                if (!mapping)
                        return 1;
 
                spin_lock_irqsave(&mapping->tree_lock, flags);
-               mapping2 = page_mapping(page);
-               if (mapping2) { /* Race with truncate? */
-                       BUG_ON(mapping2 != mapping);
-                       WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
-                       account_page_dirtied(page, mapping);
-                       radix_tree_tag_set(&mapping->page_tree,
-                               page_index(page), PAGECACHE_TAG_DIRTY);
-               }
+               BUG_ON(page_mapping(page) != mapping);
+               WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
+               account_page_dirtied(page, mapping);
+               radix_tree_tag_set(&mapping->page_tree, page_index(page),
+                                  PAGECACHE_TAG_DIRTY);
                spin_unlock_irqrestore(&mapping->tree_lock, flags);
                if (mapping->host) {
                        /* !PageAnon && !swapper_space */
@@ -2305,12 +2288,10 @@ int clear_page_dirty_for_io(struct page *page)
                /*
                 * We carefully synchronise fault handlers against
                 * installing a dirty pte and marking the page dirty
-                * at this point. We do this by having them hold the
-                * page lock at some point after installing their
-                * pte, but before marking the page dirty.
-                * Pages are always locked coming in here, so we get
-                * the desired exclusion. See mm/memory.c:do_wp_page()
-                * for more comments.
+                * at this point.  We do this by having them hold the
+                * page lock while dirtying the page, and pages are
+                * always locked coming in here, so we get the desired
+                * exclusion.
                 */
                if (TestClearPageDirty(page)) {
                        dec_zone_page_state(page, NR_FILE_DIRTY);
index 7633c503a116c221e7447614c6d10ebaa38a0b1c..8e20f9c2fa5ab7a89fb29c5dbc3987ccd8690047 100644 (file)
@@ -2332,12 +2332,21 @@ static inline struct page *
 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
        nodemask_t *nodemask, struct zone *preferred_zone,
-       int classzone_idx, int migratetype)
+       int classzone_idx, int migratetype, unsigned long *did_some_progress)
 {
        struct page *page;
 
-       /* Acquire the per-zone oom lock for each zone */
+       *did_some_progress = 0;
+
+       if (oom_killer_disabled)
+               return NULL;
+
+       /*
+        * Acquire the per-zone oom lock for each zone.  If that
+        * fails, somebody else is making progress for us.
+        */
        if (!oom_zonelist_trylock(zonelist, gfp_mask)) {
+               *did_some_progress = 1;
                schedule_timeout_uninterruptible(1);
                return NULL;
        }
@@ -2363,12 +2372,18 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
                goto out;
 
        if (!(gfp_mask & __GFP_NOFAIL)) {
+               /* Coredumps can quickly deplete all memory reserves */
+               if (current->flags & PF_DUMPCORE)
+                       goto out;
                /* The OOM killer will not help higher order allocs */
                if (order > PAGE_ALLOC_COSTLY_ORDER)
                        goto out;
                /* The OOM killer does not needlessly kill tasks for lowmem */
                if (high_zoneidx < ZONE_NORMAL)
                        goto out;
+               /* The OOM killer does not compensate for light reclaim */
+               if (!(gfp_mask & __GFP_FS))
+                       goto out;
                /*
                 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
                 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
@@ -2381,7 +2396,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
        }
        /* Exhausted what can be done so it's blamo time */
        out_of_memory(zonelist, gfp_mask, order, nodemask, false);
-
+       *did_some_progress = 1;
 out:
        oom_zonelist_unlock(zonelist, gfp_mask);
        return page;
@@ -2658,7 +2673,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
            (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
                goto nopage;
 
-restart:
+retry:
        if (!(gfp_mask & __GFP_NO_KSWAPD))
                wake_all_kswapds(order, zonelist, high_zoneidx,
                                preferred_zone, nodemask);
@@ -2681,7 +2696,6 @@ restart:
                classzone_idx = zonelist_zone_idx(preferred_zoneref);
        }
 
-rebalance:
        /* This is the last chance, in general, before the goto nopage. */
        page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
                        high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -2788,54 +2802,28 @@ rebalance:
        if (page)
                goto got_pg;
 
-       /*
-        * If we failed to make any progress reclaiming, then we are
-        * running out of options and have to consider going OOM
-        */
-       if (!did_some_progress) {
-               if (oom_gfp_allowed(gfp_mask)) {
-                       if (oom_killer_disabled)
-                               goto nopage;
-                       /* Coredumps can quickly deplete all memory reserves */
-                       if ((current->flags & PF_DUMPCORE) &&
-                           !(gfp_mask & __GFP_NOFAIL))
-                               goto nopage;
-                       page = __alloc_pages_may_oom(gfp_mask, order,
-                                       zonelist, high_zoneidx,
-                                       nodemask, preferred_zone,
-                                       classzone_idx, migratetype);
-                       if (page)
-                               goto got_pg;
-
-                       if (!(gfp_mask & __GFP_NOFAIL)) {
-                               /*
-                                * The oom killer is not called for high-order
-                                * allocations that may fail, so if no progress
-                                * is being made, there are no other options and
-                                * retrying is unlikely to help.
-                                */
-                               if (order > PAGE_ALLOC_COSTLY_ORDER)
-                                       goto nopage;
-                               /*
-                                * The oom killer is not called for lowmem
-                                * allocations to prevent needlessly killing
-                                * innocent tasks.
-                                */
-                               if (high_zoneidx < ZONE_NORMAL)
-                                       goto nopage;
-                       }
-
-                       goto restart;
-               }
-       }
-
        /* Check if we should retry the allocation */
        pages_reclaimed += did_some_progress;
        if (should_alloc_retry(gfp_mask, order, did_some_progress,
                                                pages_reclaimed)) {
+               /*
+                * If we fail to make progress by freeing individual
+                * pages, but the allocation wants us to keep going,
+                * start OOM killing tasks.
+                */
+               if (!did_some_progress) {
+                       page = __alloc_pages_may_oom(gfp_mask, order, zonelist,
+                                               high_zoneidx, nodemask,
+                                               preferred_zone, classzone_idx,
+                                               migratetype,&did_some_progress);
+                       if (page)
+                               goto got_pg;
+                       if (!did_some_progress)
+                               goto nopage;
+               }
                /* Wait for some write requests to complete then retry */
                wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
-               goto rebalance;
+               goto retry;
        } else {
                /*
                 * High-order allocations do not necessarily loop after
index c5bc241127b205734eaef62964d6d152941174cc..71cd5bd0c17d760c6f6ab1af5991165a1ac05844 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -72,6 +72,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
        anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
        if (anon_vma) {
                atomic_set(&anon_vma->refcount, 1);
+               anon_vma->degree = 1;   /* Reference for first vma */
+               anon_vma->parent = anon_vma;
                /*
                 * Initialise the anon_vma root to point to itself. If called
                 * from fork, the root will be reset to the parents anon_vma.
@@ -188,6 +190,8 @@ int anon_vma_prepare(struct vm_area_struct *vma)
                if (likely(!vma->anon_vma)) {
                        vma->anon_vma = anon_vma;
                        anon_vma_chain_link(vma, avc, anon_vma);
+                       /* vma reference or self-parent link for new root */
+                       anon_vma->degree++;
                        allocated = NULL;
                        avc = NULL;
                }
@@ -236,6 +240,14 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
 /*
  * Attach the anon_vmas from src to dst.
  * Returns 0 on success, -ENOMEM on failure.
+ *
+ * If dst->anon_vma is NULL this function tries to find and reuse existing
+ * anon_vma which has no vmas and only one child anon_vma. This prevents
+ * degradation of anon_vma hierarchy to endless linear chain in case of
+ * constantly forking task. On the other hand, an anon_vma with more than one
+ * child isn't reused even if there was no alive vma, thus rmap walker has a
+ * good chance of avoiding scanning the whole hierarchy when it searches where
+ * page is mapped.
  */
 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
 {
@@ -256,7 +268,21 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
                anon_vma = pavc->anon_vma;
                root = lock_anon_vma_root(root, anon_vma);
                anon_vma_chain_link(dst, avc, anon_vma);
+
+               /*
+                * Reuse existing anon_vma if its degree lower than two,
+                * that means it has no vma and only one anon_vma child.
+                *
+                * Do not chose parent anon_vma, otherwise first child
+                * will always reuse it. Root anon_vma is never reused:
+                * it has self-parent reference and at least one child.
+                */
+               if (!dst->anon_vma && anon_vma != src->anon_vma &&
+                               anon_vma->degree < 2)
+                       dst->anon_vma = anon_vma;
        }
+       if (dst->anon_vma)
+               dst->anon_vma->degree++;
        unlock_anon_vma_root(root);
        return 0;
 
@@ -280,6 +306,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        if (!pvma->anon_vma)
                return 0;
 
+       /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
+       vma->anon_vma = NULL;
+
        /*
         * First, attach the new VMA to the parent VMA's anon_vmas,
         * so rmap can find non-COWed pages in child processes.
@@ -288,6 +317,10 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        if (error)
                return error;
 
+       /* An existing anon_vma has been reused, all done then. */
+       if (vma->anon_vma)
+               return 0;
+
        /* Then add our own anon_vma. */
        anon_vma = anon_vma_alloc();
        if (!anon_vma)
@@ -301,6 +334,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
         * lock any of the anon_vmas in this anon_vma tree.
         */
        anon_vma->root = pvma->anon_vma->root;
+       anon_vma->parent = pvma->anon_vma;
        /*
         * With refcounts, an anon_vma can stay around longer than the
         * process it belongs to. The root anon_vma needs to be pinned until
@@ -311,6 +345,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        vma->anon_vma = anon_vma;
        anon_vma_lock_write(anon_vma);
        anon_vma_chain_link(vma, avc, anon_vma);
+       anon_vma->parent->degree++;
        anon_vma_unlock_write(anon_vma);
 
        return 0;
@@ -341,12 +376,16 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
                 * Leave empty anon_vmas on the list - we'll need
                 * to free them outside the lock.
                 */
-               if (RB_EMPTY_ROOT(&anon_vma->rb_root))
+               if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
+                       anon_vma->parent->degree--;
                        continue;
+               }
 
                list_del(&avc->same_vma);
                anon_vma_chain_free(avc);
        }
+       if (vma->anon_vma)
+               vma->anon_vma->degree--;
        unlock_anon_vma_root(root);
 
        /*
@@ -357,6 +396,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
        list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
                struct anon_vma *anon_vma = avc->anon_vma;
 
+               BUG_ON(anon_vma->degree);
                put_anon_vma(anon_vma);
 
                list_del(&avc->same_vma);
index bd9a72bc4a1b81f5b4a53e630360e725f6e1347b..dcd90c891d8e53895d117f219001329e8cdeab46 100644 (file)
@@ -2656,7 +2656,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
         * should make reasonable progress.
         */
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
-                                       gfp_mask, nodemask) {
+                                       gfp_zone(gfp_mask), nodemask) {
                if (zone_idx(zone) > ZONE_NORMAL)
                        continue;
 
@@ -2921,18 +2921,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
                return false;
 
        /*
-        * There is a potential race between when kswapd checks its watermarks
-        * and a process gets throttled. There is also a potential race if
-        * processes get throttled, kswapd wakes, a large process exits therby
-        * balancing the zones that causes kswapd to miss a wakeup. If kswapd
-        * is going to sleep, no process should be sleeping on pfmemalloc_wait
-        * so wake them now if necessary. If necessary, processes will wake
-        * kswapd and get throttled again
+        * The throttled processes are normally woken up in balance_pgdat() as
+        * soon as pfmemalloc_watermark_ok() is true. But there is a potential
+        * race between when kswapd checks the watermarks and a process gets
+        * throttled. There is also a potential race if processes get
+        * throttled, kswapd wakes, a large process exits thereby balancing the
+        * zones, which causes kswapd to exit balance_pgdat() before reaching
+        * the wake up checks. If kswapd is going to sleep, no process should
+        * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
+        * the wake up is premature, processes will wake kswapd and get
+        * throttled again. The difference from wake ups in balance_pgdat() is
+        * that here we are under prepare_to_wait().
         */
-       if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
-               wake_up(&pgdat->pfmemalloc_wait);
-               return false;
-       }
+       if (waitqueue_active(&pgdat->pfmemalloc_wait))
+               wake_up_all(&pgdat->pfmemalloc_wait);
 
        return pgdat_balanced(pgdat, order, classzone_idx);
 }
index 90cc2bdd406444df8c122066e0b5f23c33c8ed37..61bf2a06e85d62ce9d319b02f1bf1ebb569e3964 100644 (file)
@@ -9,7 +9,7 @@ bool vlan_do_receive(struct sk_buff **skbp)
 {
        struct sk_buff *skb = *skbp;
        __be16 vlan_proto = skb->vlan_proto;
-       u16 vlan_id = vlan_tx_tag_get_id(skb);
+       u16 vlan_id = skb_vlan_tag_get_id(skb);
        struct net_device *vlan_dev;
        struct vlan_pcpu_stats *rx_stats;
 
index 8ac8a5cc214331253e591fe26f6c3b39a00d5023..c92b52f37d38de143022f172881dd03f076b0194 100644 (file)
@@ -238,6 +238,13 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
+static struct net *vlan_get_link_net(const struct net_device *dev)
+{
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+
+       return dev_net(real_dev);
+}
+
 struct rtnl_link_ops vlan_link_ops __read_mostly = {
        .kind           = "vlan",
        .maxtype        = IFLA_VLAN_MAX,
@@ -250,6 +257,7 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = {
        .dellink        = unregister_vlan_dev,
        .get_size       = vlan_get_size,
        .fill_info      = vlan_fill_info,
+       .get_link_net   = vlan_get_link_net,
 };
 
 int __init vlan_netlink_init(void)
index 11660a3aab5ae0b7e0f3f2a111c6e6ab2081de59..c6fc8f756c9aa500d1efa61de95b5757d2dccee8 100644 (file)
@@ -62,6 +62,7 @@ config BATMAN_ADV_MCAST
 config BATMAN_ADV_DEBUG
        bool "B.A.T.M.A.N. debugging"
        depends on BATMAN_ADV
+       depends on DEBUG_FS
        help
          This is an option for use by developers; most people should
          say N here. This enables compilation of support for
index 1e8053976e83dd8bcb03ee37b029fe6f7140d3e5..00e00e09b0003ea80585151d854bc8b4559aee50 100644 (file)
@@ -26,9 +26,8 @@
 #include "bat_algo.h"
 #include "network-coding.h"
 
-
 /**
- * batadv_dup_status - duplicate status
+ * enum batadv_dup_status - duplicate status
  * @BATADV_NO_DUP: the packet is a duplicate
  * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
  *  neighbor)
@@ -517,7 +516,7 @@ out:
  * @bat_priv: the bat priv with all the soft interface information
  * @packet_len: (total) length of the OGM
  * @send_time: timestamp (jiffies) when the packet is to be sent
- * @direktlink: true if this is a direct link packet
+ * @directlink: true if this is a direct link packet
  * @if_incoming: interface where the packet was received
  * @if_outgoing: interface for which the retransmission should be considered
  * @forw_packet: the forwarded packet which should be checked
@@ -879,7 +878,7 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
                hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
                        spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
                        word_index = hard_iface->if_num * BATADV_NUM_WORDS;
-                       word = &(orig_node->bat_iv.bcast_own[word_index]);
+                       word = &orig_node->bat_iv.bcast_own[word_index];
 
                        batadv_bit_get_packet(bat_priv, word, 1, 0);
                        if_num = hard_iface->if_num;
@@ -1362,10 +1361,10 @@ out:
        return ret;
 }
 
-
 /**
  * batadv_iv_ogm_process_per_outif - process a batman iv OGM for an outgoing if
  * @skb: the skb containing the OGM
+ * @ogm_offset: offset from skb->data to start of ogm header
  * @orig_node: the (cached) orig node for the originator of this OGM
  * @if_incoming: the interface where this packet was received
  * @if_outgoing: the interface for which the packet should be considered
@@ -1664,7 +1663,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
                        offset = if_num * BATADV_NUM_WORDS;
 
                        spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
-                       word = &(orig_neigh_node->bat_iv.bcast_own[offset]);
+                       word = &orig_neigh_node->bat_iv.bcast_own[offset];
                        bit_pos = if_incoming_seqno - 2;
                        bit_pos -= ntohl(ogm_packet->seqno);
                        batadv_set_bit(word, bit_pos);
@@ -1902,10 +1901,10 @@ out:
  * batadv_iv_ogm_neigh_is_eob - check if neigh1 is equally good or better than
  *  neigh2 from the metric prospective
  * @neigh1: the first neighbor object of the comparison
- * @if_outgoing: outgoing interface for the first neighbor
+ * @if_outgoing1: outgoing interface for the first neighbor
  * @neigh2: the second neighbor object of the comparison
  * @if_outgoing2: outgoing interface for the second neighbor
-
+ *
  * Returns true if the metric via neigh1 is equally good or better than
  * the metric via neigh2, false otherwise.
  */
index 9586750022f506d2dd6656d8b38f709dc9ea11a3..e3da07a64026ed0552799fb954f512f7e7c013b3 100644 (file)
@@ -29,7 +29,6 @@ static void batadv_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
        bitmap_shift_left(seq_bits, seq_bits, n, BATADV_TQ_LOCAL_WINDOW_SIZE);
 }
 
-
 /* receive and process one packet within the sequence number window.
  *
  * returns:
index cc2407351d367f74fe9f288fdff79fe8db95f7c3..2acaafe60188711ad5f2d704b99c00435b0ee40b 100644 (file)
@@ -29,8 +29,7 @@ static inline int batadv_test_bit(const unsigned long *seq_bits,
        diff = last_seqno - curr_seqno;
        if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE)
                return 0;
-       else
-               return test_bit(diff, seq_bits) != 0;
+       return test_bit(diff, seq_bits) != 0;
 }
 
 /* turn corresponding bit on, so we can remember that we got the packet */
index a957c8140721def2878b292c2e61e2dd37c36e5c..ac4b96eccadeb2b055de46fcf48a7aa8b6da1350 100644 (file)
@@ -69,7 +69,6 @@ static inline uint32_t batadv_choose_backbone_gw(const void *data,
        return hash % size;
 }
 
-
 /* compares address and vid of two backbone gws */
 static int batadv_compare_backbone_gw(const struct hlist_node *node,
                                      const void *data2)
@@ -245,14 +244,14 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
                spin_unlock_bh(list_lock);
        }
 
-       /* all claims gone, intialize CRC */
+       /* all claims gone, initialize CRC */
        backbone_gw->crc = BATADV_BLA_CRC_INIT;
 }
 
 /**
  * batadv_bla_send_claim - sends a claim frame according to the provided info
  * @bat_priv: the bat priv with all the soft interface information
- * @orig: the mac address to be announced within the claim
+ * @mac: the mac address to be announced within the claim
  * @vid: the VLAN ID
  * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
  */
@@ -364,6 +363,7 @@ out:
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the mac address of the originator
  * @vid: the VLAN ID
+ * @own_backbone: set if the requested backbone is local
  *
  * searches for the backbone gw or creates a new one if it could not
  * be found.
@@ -454,6 +454,7 @@ batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
 /**
  * batadv_bla_answer_request - answer a bla request by sending own claims
  * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: interface where the request came on
  * @vid: the vid where the request came on
  *
  * Repeat all of our own claims, and finally send an ANNOUNCE frame
@@ -660,7 +661,6 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
        if (unlikely(!backbone_gw))
                return 1;
 
-
        /* handle as ANNOUNCE frame */
        backbone_gw->lasttime = jiffies;
        crc = ntohs(*((__be16 *)(&an_addr[4])));
@@ -775,6 +775,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
 /**
  * batadv_check_claim_group
  * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary interface of this batman interface
  * @hw_src: the Hardware source in the ARP Header
  * @hw_dst: the Hardware destination in the ARP Header
  * @ethhdr: pointer to the Ethernet header of the claim frame
@@ -846,10 +847,10 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
        return 2;
 }
 
-
 /**
  * batadv_bla_process_claim
  * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary hard interface of this batman soft interface
  * @skb: the frame to be checked
  *
  * Check if this is a claim frame, and process it accordingly.
@@ -1327,7 +1328,7 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
                goto out;
        }
        /* not found, add a new entry (overwrite the oldest entry)
-        * and allow it, its the first occurence.
+        * and allow it, its the first occurrence.
         */
        curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
        curr %= BATADV_DUPLIST_SIZE;
@@ -1343,8 +1344,6 @@ out:
        return ret;
 }
 
-
-
 /**
  * batadv_bla_is_backbone_gw_orig
  * @bat_priv: the bat priv with all the soft interface information
@@ -1386,7 +1385,6 @@ bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
        return false;
 }
 
-
 /**
  * batadv_bla_is_backbone_gw
  * @skb: the frame to be checked
@@ -1476,7 +1474,6 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
        if (!atomic_read(&bat_priv->bridge_loop_avoidance))
                goto allow;
 
-
        if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
                /* don't allow broadcasts while requests are in flight */
                if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
index a12e25efaf6ff055094f843c7c5536ce861f593a..a4972874c056d3b9447e5ecea4906483b0839628 100644 (file)
@@ -233,7 +233,6 @@ static int batadv_debug_log_setup(struct batadv_priv *bat_priv)
 
 static void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
 {
-       return;
 }
 #endif
 
@@ -405,6 +404,7 @@ struct batadv_debuginfo batadv_hardif_debuginfo_##_name = { \
                .release = single_release,                      \
        },                                                      \
 }
+
 static BATADV_HARDIF_DEBUGINFO(originators, S_IRUGO,
                               batadv_originators_hardif_open);
 
index b5981113c9a77a3d7546246dcec813545e6afdbf..aad022dd15df5408aaffe6f80c7e17baca2d0002 100644 (file)
@@ -1100,6 +1100,7 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
        batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
        batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
 }
+
 /**
  * batadv_dat_snoop_incoming_arp_reply - snoop the ARP reply and fill the local
  * DAT storage only
index d76e1d06c5b53019230aee93ba39730ab60987e1..2fe0764c64be80c2598b12b7f00933c36221838e 100644 (file)
@@ -25,9 +25,7 @@
 
 #include <linux/if_arp.h>
 
-/**
- * BATADV_DAT_ADDR_MAX - maximum address value in the DHT space
- */
+/* BATADV_DAT_ADDR_MAX - maximum address value in the DHT space */
 #define BATADV_DAT_ADDR_MAX ((batadv_dat_addr_t)~(batadv_dat_addr_t)0)
 
 void batadv_dat_status_update(struct net_device *net_dev);
index 00f9e144cc97b1afe7cefc5b682524103799e4b7..3d1dcaa3e8b5af2040ec1cab987d98be876f35f8 100644 (file)
@@ -23,7 +23,6 @@
 #include "hard-interface.h"
 #include "soft-interface.h"
 
-
 /**
  * batadv_frag_clear_chain - delete entries in the fragment buffer chain
  * @head: head of chain with entries.
index 5d7a0e66a22b35d54baf5910f8b923981d11c0b5..d848cf6676a2e4b7853a263f496d1ff0a0260a86 100644 (file)
@@ -41,8 +41,7 @@ batadv_frag_check_entry(struct batadv_frag_table_entry *frags_entry)
        if (!hlist_empty(&frags_entry->head) &&
            batadv_has_timed_out(frags_entry->timestamp, BATADV_FRAG_TIMEOUT))
                return true;
-       else
-               return false;
+       return false;
 }
 
 #endif /* _NET_BATMAN_ADV_FRAGMENTATION_H_ */
index e0bcf9e842737427e8dc6d8f5e1be57f92252933..27649e85f3f666b6131ae4becf367ce3a7b108d8 100644 (file)
@@ -775,6 +775,7 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
 
        return ret;
 }
+
 /**
  * batadv_gw_out_of_range - check if the dhcp request destination is the best gw
  * @bat_priv: the bat priv with all the soft interface information
index d1183e882167c3cd75aa842c94c612818e250d30..12fc77bef23faf0313a551ec268c836891171813 100644 (file)
@@ -41,7 +41,6 @@
 #include "network-coding.h"
 #include "fragmentation.h"
 
-
 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
  * list traversals just rcu-locked
  */
@@ -403,6 +402,9 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
                goto err_free;
        }
 
+       /* reset control block to avoid left overs from previous users */
+       memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
+
        /* all receive handlers return whether they received or reused
         * the supplied skb. if not, we have to free the skb.
         */
@@ -651,7 +653,7 @@ static struct batadv_tvlv_handler
 /**
  * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
  *  possibly free it
- * @tvlv_handler: the tvlv container to free
+ * @tvlv: the tvlv container to free
  */
 static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
 {
@@ -796,11 +798,11 @@ void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accomodate
+ * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
  *  requested packet size
  * @packet_buff: packet buffer
  * @packet_buff_len: packet buffer size
- * @packet_min_len: requested packet minimum size
+ * @min_packet_len: requested packet minimum size
  * @additional_packet_len: requested additional packet size on top of minimum
  *  size
  *
index a1fcd884f0b12845a983f43a6057f130f6cdf1e7..4d2318829a3420582aab6963535a1ba51a3d7087 100644 (file)
@@ -24,7 +24,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2014.4.0"
+#define BATADV_SOURCE_VERSION "2015.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -92,9 +92,8 @@
 /* numbers of originator to contact for any PUT/GET DHT operation */
 #define BATADV_DAT_CANDIDATES_NUM 3
 
-/**
- * BATADV_TQ_SIMILARITY_THRESHOLD - TQ points that a secondary metric can differ
- *  at most from the primary one in order to be still considered acceptable
+/* BATADV_TQ_SIMILARITY_THRESHOLD - TQ points that a secondary metric can differ
+ * at most from the primary one in order to be still considered acceptable
  */
 #define BATADV_TQ_SIMILARITY_THRESHOLD 50
 
@@ -313,10 +312,10 @@ static inline bool batadv_has_timed_out(unsigned long timestamp,
  *  - when adding 128 - it is neither a predecessor nor a successor,
  *  - after adding more than 127 to the starting value - it is a successor
  */
-#define batadv_seq_before(x, y) ({typeof(x) _d1 = (x); \
-                                typeof(y) _d2 = (y); \
-                                typeof(x) _dummy = (_d1 - _d2); \
-                                (void) (&_d1 == &_d2); \
+#define batadv_seq_before(x, y) ({typeof(x)_d1 = (x); \
+                                typeof(y)_d2 = (y); \
+                                typeof(x)_dummy = (_d1 - _d2); \
+                                (void)(&_d1 == &_d2); \
                                 _dummy > batadv_smallest_signed_int(_dummy); })
 #define batadv_seq_after(x, y) batadv_seq_before(y, x)
 
index ab6bb2af1d45d51a77b93a062b8a5b59cc69b1c0..b24e4bb64fb5fd51c813df7e5801b98d27caf959 100644 (file)
@@ -685,11 +685,13 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
                if (orig_initialized)
                        atomic_dec(&bat_priv->mcast.num_disabled);
                orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
-       /* If mcast support is being switched off increase the disabled
-        * mcast node counter.
+       /* If mcast support is being switched off or if this is an initial
+        * OGM without mcast support then increase the disabled mcast
+        * node counter.
         */
        } else if (!orig_mcast_enabled &&
-                  orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) {
+                  (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
+                   !orig_initialized)) {
                atomic_inc(&bat_priv->mcast.num_disabled);
                orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
        }
@@ -738,7 +740,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
 {
        struct batadv_priv *bat_priv = orig->bat_priv;
 
-       if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST))
+       if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
+           orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
                atomic_dec(&bat_priv->mcast.num_disabled);
 
        batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
index 73b5d45819c1b0060f53ed4ac2cdd466206bf37c..3a44ebdb43cba601b34e932a4d752baa33ee2f0d 100644 (file)
@@ -50,7 +50,6 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
 
 static inline void batadv_mcast_mla_update(struct batadv_priv *bat_priv)
 {
-       return;
 }
 
 static inline enum batadv_forw_mode
@@ -67,12 +66,10 @@ static inline int batadv_mcast_init(struct batadv_priv *bat_priv)
 
 static inline void batadv_mcast_free(struct batadv_priv *bat_priv)
 {
-       return;
 }
 
 static inline void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node)
 {
-       return;
 }
 
 #endif /* CONFIG_BATMAN_ADV_MCAST */
index 8d04d174669ed29c467436d33b099ad5d0ca13c4..127cc4d7380a1e7186aee6cc9064199ef48ff9bb 100644 (file)
@@ -133,7 +133,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
        if (!bat_priv->nc.decoding_hash)
                goto err;
 
-       batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
+       batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
                                   &batadv_nc_decoding_hash_lock_class_key);
 
        INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
@@ -1212,8 +1212,7 @@ static bool batadv_nc_skb_coding_possible(struct sk_buff *skb,
 {
        if (BATADV_SKB_CB(skb)->decoded && !batadv_compare_eth(dst, src))
                return false;
-       else
-               return true;
+       return true;
 }
 
 /**
index 6a484514cd3e98b9e0b27a924b4dcb92f2682055..90e805aba3795dd2ad08d5c29f06b8d925351275 100644 (file)
@@ -570,9 +570,6 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
 
        batadv_frag_purge_orig(orig_node, NULL);
 
-       batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
-                                 "originator timed out");
-
        if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
                orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
 
@@ -678,6 +675,7 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
        atomic_set(&orig_node->last_ttvn, 0);
        orig_node->tt_buff = NULL;
        orig_node->tt_buff_len = 0;
+       orig_node->last_seen = jiffies;
        reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
        orig_node->bcast_seqno_reset = reset_time;
 #ifdef CONFIG_BATMAN_ADV_MCAST
@@ -799,7 +797,6 @@ batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
        return ifinfo_purged;
 }
 
-
 /**
  * batadv_purge_orig_neighbors - purges neighbors from originator
  * @bat_priv: the bat priv with all the soft interface information
@@ -977,6 +974,9 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
                        if (batadv_purge_orig_node(bat_priv, orig_node)) {
                                batadv_gw_node_delete(bat_priv, orig_node);
                                hlist_del_rcu(&orig_node->hash_entry);
+                               batadv_tt_global_del_orig(orig_node->bat_priv,
+                                                         orig_node, -1,
+                                                         "originator timed out");
                                batadv_orig_node_free_ref(orig_node);
                                continue;
                        }
index db3a9ed734cb7c858c28d00e53250fd22d15f828..aa4a4369629569840e310acc2c6e6619f49a5768 100644 (file)
@@ -70,7 +70,6 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
                          unsigned short vid);
 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan);
 
-
 /* hashfunction to choose an entry in a hash table of given size
  * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
  */
index 34e096d2dce1592dcb330daaf5b6c030de32cb93..b81fbbf21a6393af841ababdb845ba6179d79ca1 100644 (file)
@@ -198,6 +198,7 @@ struct batadv_bla_claim_dst {
        uint8_t type;           /* bla_claimframe */
        __be16 group;           /* group id */
 };
+
 #pragma pack()
 
 /**
@@ -376,7 +377,7 @@ struct batadv_frag_packet {
        uint8_t reserved:4;
        uint8_t no:4;
 #else
-#error "unknown bitfield endianess"
+#error "unknown bitfield endianness"
 #endif
        uint8_t dest[ETH_ALEN];
        uint8_t orig[ETH_ALEN];
@@ -452,7 +453,7 @@ struct batadv_coded_packet {
  * @src: address of the source
  * @dst: address of the destination
  * @tvlv_len: length of tvlv data following the unicast tvlv header
- * @align: 2 bytes to align the header to a 4 byte boundry
+ * @align: 2 bytes to align the header to a 4 byte boundary
  */
 struct batadv_unicast_tvlv_packet {
        uint8_t  packet_type;
index 35f76f2f7824b8c2756e60bb5249fd4f5502cc8d..da83982bf974c5d84d8046b7b415b4f2f1b07cb9 100644 (file)
@@ -292,7 +292,6 @@ out:
        return ret;
 }
 
-
 int batadv_recv_icmp_packet(struct sk_buff *skb,
                            struct batadv_hard_iface *recv_if)
 {
@@ -443,11 +442,13 @@ batadv_find_router(struct batadv_priv *bat_priv,
 
        router = batadv_orig_router_get(orig_node, recv_if);
 
+       if (!router)
+               return router;
+
        /* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop)
         * and if activated.
         */
-       if (recv_if == BATADV_IF_DEFAULT || !atomic_read(&bat_priv->bonding) ||
-           !router)
+       if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding)))
                return router;
 
        /* bonding: loop through the list of possible routers found
@@ -455,7 +456,7 @@ batadv_find_router(struct batadv_priv *bat_priv,
         * the last chosen bonding candidate (next_candidate). If no such
         * router is found, use the first candidate found (the previously
         * chosen bonding candidate might have been the last one in the list).
-        * If this can't be found either, return the previously choosen
+        * If this can't be found either, return the previously chosen
         * router - obviously there are no other candidates.
         */
        rcu_read_lock();
index 5467955eb27c6c3e069db480f8a0ebc97fcaf107..5ec31d7de24f17704b5f62d3bbabfc6101d720ae 100644 (file)
@@ -36,7 +36,6 @@
 #include "bridge_loop_avoidance.h"
 #include "network-coding.h"
 
-
 static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
 static void batadv_get_drvinfo(struct net_device *dev,
                               struct ethtool_drvinfo *info);
index f40cb0436eba1ece028c9852d5f69b9c504f62b4..a75dc12f96f8c75432ad1c709e6813748bf7506b 100644 (file)
@@ -151,7 +151,6 @@ ssize_t batadv_show_##_name(struct kobject *kobj,                   \
        static BATADV_ATTR(_name, _mode, batadv_show_##_name,           \
                           batadv_store_##_name)
 
-
 #define BATADV_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func)      \
 ssize_t batadv_store_##_name(struct kobject *kobj,                     \
                             struct attribute *attr, char *buff,        \
index 5f59e7f899a0179a544764207468c6b4b336a237..07b263a437d1b2488d0e882696669593df23eb62 100644 (file)
@@ -1780,7 +1780,6 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
                batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
                                             orig_node, message);
 
-
 out:
        if (tt_global_entry)
                batadv_tt_global_entry_free_ref(tt_global_entry);
@@ -2769,9 +2768,8 @@ static bool batadv_send_tt_response(struct batadv_priv *bat_priv,
 {
        if (batadv_is_my_mac(bat_priv, req_dst))
                return batadv_send_my_tt_response(bat_priv, tt_data, req_src);
-       else
-               return batadv_send_other_tt_response(bat_priv, tt_data,
-                                                    req_src, req_dst);
+       return batadv_send_other_tt_response(bat_priv, tt_data, req_src,
+                                            req_dst);
 }
 
 static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
@@ -2854,7 +2852,7 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
 /**
  * batadv_is_my_client - check if a client is served by the local node
  * @bat_priv: the bat priv with all the soft interface information
- * @addr: the mac adress of the client to check
+ * @addr: the mac address of the client to check
  * @vid: VLAN identifier
  *
  * Returns true if the client is served by this node, false otherwise.
index 8854c05622a9bae2b8f30b0cf91686e8c629a849..9398c3fb417472962ddb58b0344de56006006b85 100644 (file)
@@ -199,7 +199,6 @@ struct batadv_orig_bat_iv {
 /**
  * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
  * @orig: originator ethernet address
- * @primary_addr: hosts primary interface address
  * @ifinfo_list: list for routers per outgoing interface
  * @last_bonding_candidate: pointer to last ifinfo of last used router
  * @batadv_dat_addr_t:  address of the orig node in the distributed hash
@@ -244,7 +243,6 @@ struct batadv_orig_bat_iv {
  */
 struct batadv_orig_node {
        uint8_t orig[ETH_ALEN];
-       uint8_t primary_addr[ETH_ALEN];
        struct hlist_head ifinfo_list;
        struct batadv_orig_ifinfo *last_bonding_candidate;
 #ifdef CONFIG_BATMAN_ADV_DAT
@@ -970,7 +968,7 @@ struct batadv_tt_orig_list_entry {
 };
 
 /**
- * struct batadv_tt_change_node - structure for tt changes occured
+ * struct batadv_tt_change_node - structure for tt changes occurred
  * @list: list node for batadv_priv_tt::changes_list
  * @change: holds the actual translation table diff data
  */
index c989253737f05985e214fbf2f272a69412f9a0fb..1742b849fcff7ca6d5bb9fbe8b375c2514992531 100644 (file)
@@ -31,7 +31,7 @@
 
 #define VERSION "0.1"
 
-static struct dentry *lowpan_psm_debugfs;
+static struct dentry *lowpan_enable_debugfs;
 static struct dentry *lowpan_control_debugfs;
 
 #define IFACE_NAME_TEMPLATE "bt%d"
@@ -55,11 +55,7 @@ struct skb_cb {
 static LIST_HEAD(bt_6lowpan_devices);
 static DEFINE_SPINLOCK(devices_lock);
 
-/* If psm is set to 0 (default value), then 6lowpan is disabled.
- * Other values are used to indicate a Protocol Service Multiplexer
- * value for 6lowpan.
- */
-static u16 psm_6lowpan;
+static bool enable_6lowpan;
 
 /* We are listening incoming connections via this channel
  */
@@ -761,7 +757,7 @@ static bool is_bt_6lowpan(struct hci_conn *hcon)
        if (hcon->type != LE_LINK)
                return false;
 
-       if (!psm_6lowpan)
+       if (!enable_6lowpan)
                return false;
 
        return true;
@@ -1085,7 +1081,7 @@ static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
        if (!pchan)
                return -EINVAL;
 
-       err = l2cap_chan_connect(pchan, cpu_to_le16(psm_6lowpan), 0,
+       err = l2cap_chan_connect(pchan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
                                 addr, dst_type);
 
        BT_DBG("chan %p err %d", pchan, err);
@@ -1118,7 +1114,7 @@ static struct l2cap_chan *bt_6lowpan_listen(void)
        struct l2cap_chan *pchan;
        int err;
 
-       if (psm_6lowpan == 0)
+       if (!enable_6lowpan)
                return NULL;
 
        pchan = chan_get();
@@ -1130,10 +1126,9 @@ static struct l2cap_chan *bt_6lowpan_listen(void)
 
        atomic_set(&pchan->nesting, L2CAP_NESTING_PARENT);
 
-       BT_DBG("psm 0x%04x chan %p src type %d", psm_6lowpan, pchan,
-              pchan->src_type);
+       BT_DBG("chan %p src type %d", pchan, pchan->src_type);
 
-       err = l2cap_add_psm(pchan, addr, cpu_to_le16(psm_6lowpan));
+       err = l2cap_add_psm(pchan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
        if (err) {
                l2cap_chan_put(pchan);
                BT_ERR("psm cannot be added err %d", err);
@@ -1219,22 +1214,23 @@ static void disconnect_all_peers(void)
        spin_unlock(&devices_lock);
 }
 
-struct set_psm {
+struct set_enable {
        struct work_struct work;
-       u16 psm;
+       bool flag;
 };
 
-static void do_psm_set(struct work_struct *work)
+static void do_enable_set(struct work_struct *work)
 {
-       struct set_psm *set_psm = container_of(work, struct set_psm, work);
+       struct set_enable *set_enable = container_of(work,
+                                                    struct set_enable, work);
 
-       if (set_psm->psm == 0 || psm_6lowpan != set_psm->psm)
+       if (!set_enable->flag || enable_6lowpan != set_enable->flag)
                /* Disconnect existing connections if 6lowpan is
-                * disabled (psm = 0), or if psm changes.
+                * disabled
                 */
                disconnect_all_peers();
 
-       psm_6lowpan = set_psm->psm;
+       enable_6lowpan = set_enable->flag;
 
        if (listen_chan) {
                l2cap_chan_close(listen_chan, 0);
@@ -1243,33 +1239,33 @@ static void do_psm_set(struct work_struct *work)
 
        listen_chan = bt_6lowpan_listen();
 
-       kfree(set_psm);
+       kfree(set_enable);
 }
 
-static int lowpan_psm_set(void *data, u64 val)
+static int lowpan_enable_set(void *data, u64 val)
 {
-       struct set_psm *set_psm;
+       struct set_enable *set_enable;
 
-       set_psm = kzalloc(sizeof(*set_psm), GFP_KERNEL);
-       if (!set_psm)
+       set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL);
+       if (!set_enable)
                return -ENOMEM;
 
-       set_psm->psm = val;
-       INIT_WORK(&set_psm->work, do_psm_set);
+       set_enable->flag = !!val;
+       INIT_WORK(&set_enable->work, do_enable_set);
 
-       schedule_work(&set_psm->work);
+       schedule_work(&set_enable->work);
 
        return 0;
 }
 
-static int lowpan_psm_get(void *data, u64 *val)
+static int lowpan_enable_get(void *data, u64 *val)
 {
-       *val = psm_6lowpan;
+       *val = enable_6lowpan;
        return 0;
 }
 
-DEFINE_SIMPLE_ATTRIBUTE(lowpan_psm_fops, lowpan_psm_get,
-                       lowpan_psm_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
+                       lowpan_enable_set, "%llu\n");
 
 static ssize_t lowpan_control_write(struct file *fp,
                                    const char __user *user_buffer,
@@ -1439,9 +1435,9 @@ static struct notifier_block bt_6lowpan_dev_notifier = {
 
 static int __init bt_6lowpan_init(void)
 {
-       lowpan_psm_debugfs = debugfs_create_file("6lowpan_psm", 0644,
-                                                bt_debugfs, NULL,
-                                                &lowpan_psm_fops);
+       lowpan_enable_debugfs = debugfs_create_file("6lowpan_enable", 0644,
+                                                   bt_debugfs, NULL,
+                                                   &lowpan_enable_fops);
        lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
                                                     bt_debugfs, NULL,
                                                     &lowpan_control_fops);
@@ -1451,7 +1447,7 @@ static int __init bt_6lowpan_init(void)
 
 static void __exit bt_6lowpan_exit(void)
 {
-       debugfs_remove(lowpan_psm_debugfs);
+       debugfs_remove(lowpan_enable_debugfs);
        debugfs_remove(lowpan_control_debugfs);
 
        if (listen_chan) {
index ce82722d049b7c013fd06f66f726fbbf75a01f56..05f57e491ccbd614a1d306c49df891e4a2ec00c6 100644 (file)
@@ -511,13 +511,12 @@ static int bnep_session(void *arg)
 
 static struct device *bnep_get_device(struct bnep_session *session)
 {
-       struct hci_conn *conn;
+       struct l2cap_conn *conn = l2cap_pi(session->sock->sk)->chan->conn;
 
-       conn = l2cap_pi(session->sock->sk)->chan->conn->hcon;
-       if (!conn)
+       if (!conn || !conn->hcon)
                return NULL;
 
-       return &conn->dev;
+       return &conn->hcon->dev;
 }
 
 static struct device_type bnep_type = {
index 1ca8a87a078776d75d223a204aa4f42770c229a3..75bd2c42e3e791024abf9d4014fbc41d12dea0da 100644 (file)
@@ -253,8 +253,6 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
                        if (skb->len < CAPI_MSG_BASELEN + 15)
                                break;
 
-                       controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 10);
-
                        if (!info && ctrl) {
                                int len = min_t(uint, CAPI_MANUFACTURER_LEN,
                                                skb->data[CAPI_MSG_BASELEN + 14]);
@@ -270,8 +268,6 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
                        if (skb->len < CAPI_MSG_BASELEN + 32)
                                break;
 
-                       controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12);
-
                        if (!info && ctrl) {
                                ctrl->version.majorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 16);
                                ctrl->version.minorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 20);
@@ -285,8 +281,6 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
                        if (skb->len < CAPI_MSG_BASELEN + 17)
                                break;
 
-                       controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12);
-
                        if (!info && ctrl) {
                                int len = min_t(uint, CAPI_SERIAL_LEN,
                                                skb->data[CAPI_MSG_BASELEN + 16]);
index 75240aaca101898755680f4e97a69e8bb29119f3..c9b8fa544785df83d1afa02629b6be00a7325934 100644 (file)
@@ -633,7 +633,7 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
        mgmt_reenable_advertising(hdev);
 }
 
-static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
+static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        struct hci_conn *conn;
 
@@ -1084,21 +1084,6 @@ int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
 }
 EXPORT_SYMBOL(hci_conn_check_secure);
 
-/* Change link key */
-int hci_conn_change_link_key(struct hci_conn *conn)
-{
-       BT_DBG("hcon %p", conn);
-
-       if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
-               struct hci_cp_change_conn_link_key cp;
-               cp.handle = cpu_to_le16(conn->handle);
-               hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
-                            sizeof(cp), &cp);
-       }
-
-       return 0;
-}
-
 /* Switch role */
 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
 {
index 5ef5221c1813fc43804b7d0e9677e792e3884f6c..3322d3f4c85a25eb4bed8dbfaa1802b907b3f1d3 100644 (file)
@@ -141,7 +141,7 @@ static const struct file_operations dut_mode_fops = {
 
 /* ---- HCI requests ---- */
 
-static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
+static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
 {
        BT_DBG("%s result 0x%2.2x", hdev->name, result);
 
@@ -497,43 +497,6 @@ static void le_setup(struct hci_request *req)
                set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
 }
 
-static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
-{
-       if (lmp_ext_inq_capable(hdev))
-               return 0x02;
-
-       if (lmp_inq_rssi_capable(hdev))
-               return 0x01;
-
-       if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
-           hdev->lmp_subver == 0x0757)
-               return 0x01;
-
-       if (hdev->manufacturer == 15) {
-               if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
-                       return 0x01;
-               if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
-                       return 0x01;
-               if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
-                       return 0x01;
-       }
-
-       if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
-           hdev->lmp_subver == 0x1805)
-               return 0x01;
-
-       return 0x00;
-}
-
-static void hci_setup_inquiry_mode(struct hci_request *req)
-{
-       u8 mode;
-
-       mode = hci_get_inquiry_mode(req->hdev);
-
-       hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
-}
-
 static void hci_setup_event_mask(struct hci_request *req)
 {
        struct hci_dev *hdev = req->hdev;
@@ -646,6 +609,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
 
                if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
                        u8 mode = 0x01;
+
                        hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
                                    sizeof(mode), &mode);
                } else {
@@ -658,8 +622,18 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
                }
        }
 
-       if (lmp_inq_rssi_capable(hdev))
-               hci_setup_inquiry_mode(req);
+       if (lmp_inq_rssi_capable(hdev) ||
+           test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
+               u8 mode;
+
+               /* If Extended Inquiry Result events are supported, then
+                * they are clearly preferred over Inquiry Result with RSSI
+                * events.
+                */
+               mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
+
+               hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
+       }
 
        if (lmp_inq_tx_pwr_capable(hdev))
                hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
@@ -758,27 +732,12 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
 
        hci_setup_event_mask(req);
 
-       /* Some Broadcom based Bluetooth controllers do not support the
-        * Delete Stored Link Key command. They are clearly indicating its
-        * absence in the bit mask of supported commands.
-        *
-        * Check the supported commands and only if the the command is marked
-        * as supported send it. If not supported assume that the controller
-        * does not have actual support for stored link keys which makes this
-        * command redundant anyway.
-        *
-        * Some controllers indicate that they support handling deleting
-        * stored link keys, but they don't. The quirk lets a driver
-        * just disable this command.
-        */
-       if (hdev->commands[6] & 0x80 &&
-           !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
-               struct hci_cp_delete_stored_link_key cp;
+       if (hdev->commands[6] & 0x20) {
+               struct hci_cp_read_stored_link_key cp;
 
                bacpy(&cp.bdaddr, BDADDR_ANY);
-               cp.delete_all = 0x01;
-               hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
-                           sizeof(cp), &cp);
+               cp.read_all = 0x01;
+               hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
        }
 
        if (hdev->commands[5] & 0x10)
@@ -872,6 +831,29 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
 {
        struct hci_dev *hdev = req->hdev;
 
+       /* Some Broadcom based Bluetooth controllers do not support the
+        * Delete Stored Link Key command. They are clearly indicating its
+        * absence in the bit mask of supported commands.
+        *
+        * Check the supported commands and only if the the command is marked
+        * as supported send it. If not supported assume that the controller
+        * does not have actual support for stored link keys which makes this
+        * command redundant anyway.
+        *
+        * Some controllers indicate that they support handling deleting
+        * stored link keys, but they don't. The quirk lets a driver
+        * just disable this command.
+        */
+       if (hdev->commands[6] & 0x80 &&
+           !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
+               struct hci_cp_delete_stored_link_key cp;
+
+               bacpy(&cp.bdaddr, BDADDR_ANY);
+               cp.delete_all = 0x01;
+               hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
+                           sizeof(cp), &cp);
+       }
+
        /* Set event mask page 2 if the HCI command for it is supported */
        if (hdev->commands[22] & 0x04)
                hci_set_event_mask_page_2(req);
@@ -889,8 +871,10 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
                hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
 
        /* Enable Secure Connections if supported and configured */
-       if (bredr_sc_enabled(hdev)) {
+       if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+           bredr_sc_enabled(hdev)) {
                u8 support = 0x01;
+
                hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
                            sizeof(support), &support);
        }
@@ -931,10 +915,20 @@ static int __hci_init(struct hci_dev *hdev)
        if (err < 0)
                return err;
 
-       /* Only create debugfs entries during the initial setup
-        * phase and not every time the controller gets powered on.
+       /* This function is only called when the controller is actually in
+        * configured state. When the controller is marked as unconfigured,
+        * this initialization procedure is not run.
+        *
+        * It means that it is possible that a controller runs through its
+        * setup phase and then discovers missing settings. If that is the
+        * case, then this function will not be called. It then will only
+        * be called during the config phase.
+        *
+        * So only when in setup phase or config phase, create the debugfs
+        * entries and register the SMP channels.
         */
-       if (!test_bit(HCI_SETUP, &hdev->dev_flags))
+       if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
+           !test_bit(HCI_CONFIG, &hdev->dev_flags))
                return 0;
 
        hci_debugfs_create_common(hdev);
@@ -942,10 +936,8 @@ static int __hci_init(struct hci_dev *hdev)
        if (lmp_bredr_capable(hdev))
                hci_debugfs_create_bredr(hdev);
 
-       if (lmp_le_capable(hdev)) {
+       if (lmp_le_capable(hdev))
                hci_debugfs_create_le(hdev);
-               smp_register(hdev);
-       }
 
        return 0;
 }
@@ -1625,6 +1617,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
                cancel_delayed_work(&hdev->service_cache);
 
        cancel_delayed_work_sync(&hdev->le_scan_disable);
+       cancel_delayed_work_sync(&hdev->le_scan_restart);
 
        if (test_bit(HCI_MGMT, &hdev->dev_flags))
                cancel_delayed_work_sync(&hdev->rpa_expired);
@@ -1636,6 +1629,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 
        hci_dev_lock(hdev);
 
+       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
        if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
                if (hdev->dev_type == HCI_BREDR)
                        mgmt_powered(hdev, 0);
@@ -1646,6 +1641,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        hci_conn_hash_flush(hdev);
        hci_dev_unlock(hdev);
 
+       smp_unregister(hdev);
+
        hci_notify(hdev, HCI_DEV_DOWN);
 
        if (hdev->flush)
@@ -1725,32 +1722,14 @@ done:
        return err;
 }
 
-int hci_dev_reset(__u16 dev)
+static int hci_dev_do_reset(struct hci_dev *hdev)
 {
-       struct hci_dev *hdev;
-       int ret = 0;
+       int ret;
 
-       hdev = hci_dev_get(dev);
-       if (!hdev)
-               return -ENODEV;
+       BT_DBG("%s %p", hdev->name, hdev);
 
        hci_req_lock(hdev);
 
-       if (!test_bit(HCI_UP, &hdev->flags)) {
-               ret = -ENETDOWN;
-               goto done;
-       }
-
-       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
-               ret = -EBUSY;
-               goto done;
-       }
-
-       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
-               ret = -EOPNOTSUPP;
-               goto done;
-       }
-
        /* Drop queues */
        skb_queue_purge(&hdev->rx_q);
        skb_queue_purge(&hdev->cmd_q);
@@ -1773,12 +1752,41 @@ int hci_dev_reset(__u16 dev)
 
        ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
 
-done:
        hci_req_unlock(hdev);
-       hci_dev_put(hdev);
        return ret;
 }
 
+int hci_dev_reset(__u16 dev)
+{
+       struct hci_dev *hdev;
+       int err;
+
+       hdev = hci_dev_get(dev);
+       if (!hdev)
+               return -ENODEV;
+
+       if (!test_bit(HCI_UP, &hdev->flags)) {
+               err = -ENETDOWN;
+               goto done;
+       }
+
+       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+               err = -EBUSY;
+               goto done;
+       }
+
+       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+               err = -EOPNOTSUPP;
+               goto done;
+       }
+
+       err = hci_dev_do_reset(hdev);
+
+done:
+       hci_dev_put(hdev);
+       return err;
+}
+
 int hci_dev_reset_stat(__u16 dev)
 {
        struct hci_dev *hdev;
@@ -2144,6 +2152,24 @@ static void hci_power_off(struct work_struct *work)
        hci_dev_do_close(hdev);
 }
 
+static void hci_error_reset(struct work_struct *work)
+{
+       struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
+
+       BT_DBG("%s", hdev->name);
+
+       if (hdev->hw_error)
+               hdev->hw_error(hdev, hdev->hw_error_code);
+       else
+               BT_ERR("%s hardware error 0x%2.2x", hdev->name,
+                      hdev->hw_error_code);
+
+       if (hci_dev_do_close(hdev))
+               return;
+
+       hci_dev_do_open(hdev);
+}
+
 static void hci_discov_off(struct work_struct *work)
 {
        struct hci_dev *hdev;
@@ -2556,9 +2582,15 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
        if (hash192 && rand192) {
                memcpy(data->hash192, hash192, sizeof(data->hash192));
                memcpy(data->rand192, rand192, sizeof(data->rand192));
+               if (hash256 && rand256)
+                       data->present = 0x03;
        } else {
                memset(data->hash192, 0, sizeof(data->hash192));
                memset(data->rand192, 0, sizeof(data->rand192));
+               if (hash256 && rand256)
+                       data->present = 0x02;
+               else
+                       data->present = 0x00;
        }
 
        if (hash256 && rand256) {
@@ -2567,6 +2599,8 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
        } else {
                memset(data->hash256, 0, sizeof(data->hash256));
                memset(data->rand256, 0, sizeof(data->rand256));
+               if (hash192 && rand192)
+                       data->present = 0x01;
        }
 
        BT_DBG("%s for %pMR", hdev->name, bdaddr);
@@ -2771,7 +2805,7 @@ void hci_conn_params_clear_all(struct hci_dev *hdev)
        BT_DBG("All LE connection parameters were removed");
 }
 
-static void inquiry_complete(struct hci_dev *hdev, u8 status)
+static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        if (status) {
                BT_ERR("Failed to start inquiry: status %d", status);
@@ -2783,7 +2817,8 @@ static void inquiry_complete(struct hci_dev *hdev, u8 status)
        }
 }
 
-static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
+static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
+                                         u16 opcode)
 {
        /* General inquiry access code (GIAC) */
        u8 lap[3] = { 0x33, 0x8b, 0x9e };
@@ -2796,6 +2831,8 @@ static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
                return;
        }
 
+       hdev->discovery.scan_start = 0;
+
        switch (hdev->discovery.type) {
        case DISCOV_TYPE_LE:
                hci_dev_lock(hdev);
@@ -2835,6 +2872,8 @@ static void le_scan_disable_work(struct work_struct *work)
 
        BT_DBG("%s", hdev->name);
 
+       cancel_delayed_work_sync(&hdev->le_scan_restart);
+
        hci_req_init(&req, hdev);
 
        hci_req_add_le_scan_disable(&req);
@@ -2844,6 +2883,74 @@ static void le_scan_disable_work(struct work_struct *work)
                BT_ERR("Disable LE scanning request failed: err %d", err);
 }
 
+static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
+                                         u16 opcode)
+{
+       unsigned long timeout, duration, scan_start, now;
+
+       BT_DBG("%s", hdev->name);
+
+       if (status) {
+               BT_ERR("Failed to restart LE scan: status %d", status);
+               return;
+       }
+
+       if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
+           !hdev->discovery.scan_start)
+               return;
+
+       /* When the scan was started, hdev->le_scan_disable has been queued
+        * after duration from scan_start. During scan restart this job
+        * has been canceled, and we need to queue it again after proper
+        * timeout, to make sure that scan does not run indefinitely.
+        */
+       duration = hdev->discovery.scan_duration;
+       scan_start = hdev->discovery.scan_start;
+       now = jiffies;
+       if (now - scan_start <= duration) {
+               int elapsed;
+
+               if (now >= scan_start)
+                       elapsed = now - scan_start;
+               else
+                       elapsed = ULONG_MAX - scan_start + now;
+
+               timeout = duration - elapsed;
+       } else {
+               timeout = 0;
+       }
+       queue_delayed_work(hdev->workqueue,
+                          &hdev->le_scan_disable, timeout);
+}
+
+static void le_scan_restart_work(struct work_struct *work)
+{
+       struct hci_dev *hdev = container_of(work, struct hci_dev,
+                                           le_scan_restart.work);
+       struct hci_request req;
+       struct hci_cp_le_set_scan_enable cp;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       /* If controller is not scanning we are done. */
+       if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+               return;
+
+       hci_req_init(&req, hdev);
+
+       hci_req_add_le_scan_disable(&req);
+
+       memset(&cp, 0, sizeof(cp));
+       cp.enable = LE_SCAN_ENABLE;
+       cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+       hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+
+       err = hci_req_run(&req, le_scan_restart_work_complete);
+       if (err)
+               BT_ERR("Restart LE scan request failed: err %d", err);
+}
+
 /* Copy the Identity Address of the controller.
  *
  * If the controller has a public BD_ADDR, then by default use that one.
@@ -2935,10 +3042,12 @@ struct hci_dev *hci_alloc_dev(void)
        INIT_WORK(&hdev->cmd_work, hci_cmd_work);
        INIT_WORK(&hdev->tx_work, hci_tx_work);
        INIT_WORK(&hdev->power_on, hci_power_on);
+       INIT_WORK(&hdev->error_reset, hci_error_reset);
 
        INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
        INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
        INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
+       INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
 
        skb_queue_head_init(&hdev->rx_q);
        skb_queue_head_init(&hdev->cmd_q);
@@ -3108,8 +3217,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
                rfkill_destroy(hdev->rfkill);
        }
 
-       smp_unregister(hdev);
-
        device_del(&hdev->dev);
 
        debugfs_remove_recursive(hdev->debugfs);
@@ -4176,7 +4283,7 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
 
 call_complete:
        if (req_complete)
-               req_complete(hdev, status);
+               req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
 }
 
 static void hci_rx_work(struct work_struct *work)
index ee33ce88d3d82caf82475150ec93b75203e237d9..65261e5d4b84bbbdc1da1d11083b9f32b6233d7a 100644 (file)
@@ -156,6 +156,35 @@ static const struct file_operations uuids_fops = {
        .release        = single_release,
 };
 
+static int remote_oob_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+       struct oob_data *data;
+
+       hci_dev_lock(hdev);
+       list_for_each_entry(data, &hdev->remote_oob_data, list) {
+               seq_printf(f, "%pMR (type %u) %u %*phN %*phN %*phN %*phN\n",
+                          &data->bdaddr, data->bdaddr_type, data->present,
+                          16, data->hash192, 16, data->rand192,
+                          16, data->hash256, 19, data->rand256);
+       }
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int remote_oob_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, remote_oob_show, inode->i_private);
+}
+
+static const struct file_operations remote_oob_fops = {
+       .open           = remote_oob_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 static int conn_info_min_age_set(void *data, u64 val)
 {
        struct hci_dev *hdev = data;
@@ -212,6 +241,42 @@ static int conn_info_max_age_get(void *data, u64 *val)
 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
                        conn_info_max_age_set, "%llu\n");
 
+static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
+                                  size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[3];
+
+       buf[0] = test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static const struct file_operations use_debug_keys_fops = {
+       .open           = simple_open,
+       .read           = use_debug_keys_read,
+       .llseek         = default_llseek,
+};
+
+static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
+                                size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[3];
+
+       buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static const struct file_operations sc_only_mode_fops = {
+       .open           = simple_open,
+       .read           = sc_only_mode_read,
+       .llseek         = default_llseek,
+};
+
 void hci_debugfs_create_common(struct hci_dev *hdev)
 {
        debugfs_create_file("features", 0444, hdev->debugfs, hdev,
@@ -220,16 +285,29 @@ void hci_debugfs_create_common(struct hci_dev *hdev)
                           &hdev->manufacturer);
        debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
        debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
+       debugfs_create_u8("hardware_error", 0444, hdev->debugfs,
+                         &hdev->hw_error_code);
+
        debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
                            &device_list_fops);
        debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
                            &blacklist_fops);
        debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
+       debugfs_create_file("remote_oob", 0400, hdev->debugfs, hdev,
+                           &remote_oob_fops);
 
        debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
                            &conn_info_min_age_fops);
        debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
                            &conn_info_max_age_fops);
+
+       if (lmp_ssp_capable(hdev) || lmp_le_capable(hdev))
+               debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
+                                   hdev, &use_debug_keys_fops);
+
+       if (lmp_sc_capable(hdev) || lmp_le_capable(hdev))
+               debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
+                                   hdev, &sc_only_mode_fops);
 }
 
 static int inquiry_cache_show(struct seq_file *f, void *p)
@@ -332,6 +410,24 @@ static int voice_setting_get(void *data, u64 *val)
 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
                        NULL, "0x%4.4llx\n");
 
+static ssize_t ssp_debug_mode_read(struct file *file, char __user *user_buf,
+                                  size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[3];
+
+       buf[0] = hdev->ssp_debug_mode ? 'Y': 'N';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static const struct file_operations ssp_debug_mode_fops = {
+       .open           = simple_open,
+       .read           = ssp_debug_mode_read,
+       .llseek         = default_llseek,
+};
+
 static int auto_accept_delay_set(void *data, u64 val)
 {
        struct hci_dev *hdev = data;
@@ -357,114 +453,6 @@ static int auto_accept_delay_get(void *data, u64 *val)
 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
                        auto_accept_delay_set, "%llu\n");
 
-static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
-                                size_t count, loff_t *ppos)
-{
-       struct hci_dev *hdev = file->private_data;
-       char buf[3];
-
-       buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
-       buf[1] = '\n';
-       buf[2] = '\0';
-       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static const struct file_operations sc_only_mode_fops = {
-       .open           = simple_open,
-       .read           = sc_only_mode_read,
-       .llseek         = default_llseek,
-};
-
-static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
-                                    size_t count, loff_t *ppos)
-{
-       struct hci_dev *hdev = file->private_data;
-       char buf[3];
-
-       buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
-       buf[1] = '\n';
-       buf[2] = '\0';
-       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static ssize_t force_sc_support_write(struct file *file,
-                                     const char __user *user_buf,
-                                     size_t count, loff_t *ppos)
-{
-       struct hci_dev *hdev = file->private_data;
-       char buf[32];
-       size_t buf_size = min(count, (sizeof(buf)-1));
-       bool enable;
-
-       if (test_bit(HCI_UP, &hdev->flags))
-               return -EBUSY;
-
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-
-       buf[buf_size] = '\0';
-       if (strtobool(buf, &enable))
-               return -EINVAL;
-
-       if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
-               return -EALREADY;
-
-       change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
-
-       return count;
-}
-
-static const struct file_operations force_sc_support_fops = {
-       .open           = simple_open,
-       .read           = force_sc_support_read,
-       .write          = force_sc_support_write,
-       .llseek         = default_llseek,
-};
-
-static ssize_t force_lesc_support_read(struct file *file,
-                                      char __user *user_buf,
-                                      size_t count, loff_t *ppos)
-{
-       struct hci_dev *hdev = file->private_data;
-       char buf[3];
-
-       buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
-       buf[1] = '\n';
-       buf[2] = '\0';
-       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static ssize_t force_lesc_support_write(struct file *file,
-                                       const char __user *user_buf,
-                                       size_t count, loff_t *ppos)
-{
-       struct hci_dev *hdev = file->private_data;
-       char buf[32];
-       size_t buf_size = min(count, (sizeof(buf)-1));
-       bool enable;
-
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-
-       buf[buf_size] = '\0';
-       if (strtobool(buf, &enable))
-               return -EINVAL;
-
-       if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
-               return -EALREADY;
-
-       change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
-
-       return count;
-}
-
-static const struct file_operations force_lesc_support_fops = {
-       .open           = simple_open,
-       .read           = force_lesc_support_read,
-       .write          = force_lesc_support_write,
-       .llseek         = default_llseek,
-};
-
 static int idle_timeout_set(void *data, u64 val)
 {
        struct hci_dev *hdev = data;
@@ -561,18 +549,10 @@ void hci_debugfs_create_bredr(struct hci_dev *hdev)
                            &voice_setting_fops);
 
        if (lmp_ssp_capable(hdev)) {
+               debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs,
+                                   hdev, &ssp_debug_mode_fops);
                debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
                                    hdev, &auto_accept_delay_fops);
-               debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
-                                   hdev, &sc_only_mode_fops);
-
-               debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
-                                   hdev, &force_sc_support_fops);
-
-               if (lmp_le_capable(hdev))
-                       debugfs_create_file("force_lesc_support", 0644,
-                                           hdev->debugfs, hdev,
-                                           &force_lesc_support_fops);
        }
 
        if (lmp_sniff_capable(hdev)) {
index 0881efd0ad2d8802ea32fd7cd6c882030fc7cafc..a3fb094822b621e5ef3b3205d1d5fce7c9d3f6b8 100644 (file)
@@ -36,6 +36,9 @@
 #include "amp.h"
 #include "smp.h"
 
+#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
+                "\x00\x00\x00\x00\x00\x00\x00\x00"
+
 /* Handle HCI Event packets */
 
 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
@@ -197,7 +200,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
        /* Reset all non-persistent flags */
        hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
 
-       hdev->discovery.state = DISCOVERY_STOPPED;
+       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
        hdev->inq_tx_power = HCI_TX_POWER_INVALID;
        hdev->adv_tx_power = HCI_TX_POWER_INVALID;
 
@@ -214,6 +218,40 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
        hci_bdaddr_list_clear(&hdev->le_white_list);
 }
 
+static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
+                                       struct sk_buff *skb)
+{
+       struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
+       struct hci_cp_read_stored_link_key *sent;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+       sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
+       if (!sent)
+               return;
+
+       if (!rp->status && sent->read_all == 0x01) {
+               hdev->stored_max_keys = rp->max_keys;
+               hdev->stored_num_keys = rp->num_keys;
+       }
+}
+
+static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
+                                         struct sk_buff *skb)
+{
+       struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+       if (rp->status)
+               return;
+
+       if (rp->num_keys <= hdev->stored_num_keys)
+               hdev->stored_num_keys -= rp->num_keys;
+       else
+               hdev->stored_num_keys = 0;
+}
+
 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
 {
        __u8 status = *((__u8 *) skb->data);
@@ -491,9 +529,7 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
                        hdev->features[1][0] &= ~LMP_HOST_SC;
        }
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
-               mgmt_sc_enable_complete(hdev, sent->support, status);
-       else if (!status) {
+       if (!test_bit(HCI_MGMT, &hdev->dev_flags) && !status) {
                if (sent->support)
                        set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
                else
@@ -1453,6 +1489,21 @@ unlock:
        hci_dev_unlock(hdev);
 }
 
+static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       u8 status = *((u8 *) skb->data);
+       u8 *mode;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       if (status)
+               return;
+
+       mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
+       if (mode)
+               hdev->ssp_debug_mode = *mode;
+}
+
 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
 {
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
@@ -2635,7 +2686,8 @@ static void hci_remote_features_evt(struct hci_dev *hdev,
        if (conn->state != BT_CONFIG)
                goto unlock;
 
-       if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
+       if (!ev->status && lmp_ext_feat_capable(hdev) &&
+           lmp_ext_feat_capable(conn)) {
                struct hci_cp_read_remote_ext_features cp;
                cp.handle = ev->handle;
                cp.page = 0x01;
@@ -2714,6 +2766,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cc_reset(hdev, skb);
                break;
 
+       case HCI_OP_READ_STORED_LINK_KEY:
+               hci_cc_read_stored_link_key(hdev, skb);
+               break;
+
+       case HCI_OP_DELETE_STORED_LINK_KEY:
+               hci_cc_delete_stored_link_key(hdev, skb);
+               break;
+
        case HCI_OP_WRITE_LOCAL_NAME:
                hci_cc_write_local_name(hdev, skb);
                break;
@@ -2938,6 +2998,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cc_read_tx_power(hdev, skb);
                break;
 
+       case HCI_OP_WRITE_SSP_DEBUG_MODE:
+               hci_cc_write_ssp_debug_mode(hdev, skb);
+               break;
+
        default:
                BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
                break;
@@ -3056,7 +3120,9 @@ static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_hardware_error *ev = (void *) skb->data;
 
-       BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
+       hdev->hw_error_code = ev->code;
+
+       queue_work(hdev->req_workqueue, &hdev->error_reset);
 }
 
 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3815,6 +3881,52 @@ static u8 hci_get_auth_req(struct hci_conn *conn)
        return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
 }
 
+static u8 bredr_oob_data_present(struct hci_conn *conn)
+{
+       struct hci_dev *hdev = conn->hdev;
+       struct oob_data *data;
+
+       data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
+       if (!data)
+               return 0x00;
+
+       if (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) {
+               if (bredr_sc_enabled(hdev)) {
+                       /* When Secure Connections is enabled, then just
+                        * return the present value stored with the OOB
+                        * data. The stored value contains the right present
+                        * information. However it can only be trusted when
+                        * not in Secure Connection Only mode.
+                        */
+                       if (!test_bit(HCI_SC_ONLY, &hdev->dev_flags))
+                               return data->present;
+
+                       /* When Secure Connections Only mode is enabled, then
+                        * the P-256 values are required. If they are not
+                        * available, then do not declare that OOB data is
+                        * present.
+                        */
+                       if (!memcmp(data->rand256, ZERO_KEY, 16) ||
+                           !memcmp(data->hash256, ZERO_KEY, 16))
+                               return 0x00;
+
+                       return 0x02;
+               }
+
+               /* When Secure Connections is not enabled or actually
+                * not supported by the hardware, then check that if
+                * P-192 data values are present.
+                */
+               if (!memcmp(data->rand192, ZERO_KEY, 16) ||
+                   !memcmp(data->hash192, ZERO_KEY, 16))
+                       return 0x00;
+
+               return 0x01;
+       }
+
+       return 0x00;
+}
+
 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_io_capa_request *ev = (void *) skb->data;
@@ -3866,12 +3978,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
                        conn->auth_type &= HCI_AT_NO_BONDING_MITM;
 
                cp.authentication = conn->auth_type;
-
-               if (hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR) &&
-                   (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
-                       cp.oob_data = 0x01;
-               else
-                       cp.oob_data = 0x00;
+               cp.oob_data = bredr_oob_data_present(conn);
 
                hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
                             sizeof(cp), &cp);
@@ -4123,33 +4230,39 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
                goto unlock;
 
        data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
-       if (data) {
-               if (bredr_sc_enabled(hdev)) {
-                       struct hci_cp_remote_oob_ext_data_reply cp;
-
-                       bacpy(&cp.bdaddr, &ev->bdaddr);
-                       memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
-                       memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
-                       memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
-                       memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
+       if (!data) {
+               struct hci_cp_remote_oob_data_neg_reply cp;
 
-                       hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
-                                    sizeof(cp), &cp);
-               } else {
-                       struct hci_cp_remote_oob_data_reply cp;
+               bacpy(&cp.bdaddr, &ev->bdaddr);
+               hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
+                            sizeof(cp), &cp);
+               goto unlock;
+       }
 
-                       bacpy(&cp.bdaddr, &ev->bdaddr);
-                       memcpy(cp.hash, data->hash192, sizeof(cp.hash));
-                       memcpy(cp.rand, data->rand192, sizeof(cp.rand));
+       if (bredr_sc_enabled(hdev)) {
+               struct hci_cp_remote_oob_ext_data_reply cp;
 
-                       hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
-                                    sizeof(cp), &cp);
+               bacpy(&cp.bdaddr, &ev->bdaddr);
+               if (test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
+                       memset(cp.hash192, 0, sizeof(cp.hash192));
+                       memset(cp.rand192, 0, sizeof(cp.rand192));
+               } else {
+                       memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
+                       memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
                }
+               memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
+               memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
+
+               hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
+                            sizeof(cp), &cp);
        } else {
-               struct hci_cp_remote_oob_data_neg_reply cp;
+               struct hci_cp_remote_oob_data_reply cp;
 
                bacpy(&cp.bdaddr, &ev->bdaddr);
-               hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
+               memcpy(cp.hash, data->hash192, sizeof(cp.hash));
+               memcpy(cp.rand, data->rand192, sizeof(cp.rand));
+
+               hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
                             sizeof(cp), &cp);
        }
 
index 324c6418b17c7156c8d6cbf44ea78db6939ae431..b59f92c6df0cf7cfa1b4c770cb2e880079ae0c84 100644 (file)
@@ -533,7 +533,8 @@ void __hci_update_background_scan(struct hci_request *req)
        }
 }
 
-static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
+static void update_background_scan_complete(struct hci_dev *hdev, u8 status,
+                                           u16 opcode)
 {
        if (status)
                BT_DBG("HCI request failed to update background scanning: "
index 2c245fdf319a60022328e8f3918251db526f88f8..1d65c5be7c823a282bc74a4d9691db53eb37b8d3 100644 (file)
@@ -216,11 +216,39 @@ void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
        read_unlock(&hci_sk_list.lock);
 }
 
+static void queue_monitor_skb(struct sk_buff *skb)
+{
+       struct sock *sk;
+
+       BT_DBG("len %d", skb->len);
+
+       read_lock(&hci_sk_list.lock);
+
+       sk_for_each(sk, &hci_sk_list.head) {
+               struct sk_buff *nskb;
+
+               if (sk->sk_state != BT_BOUND)
+                       continue;
+
+               if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
+                       continue;
+
+               nskb = skb_clone(skb, GFP_ATOMIC);
+               if (!nskb)
+                       continue;
+
+               if (sock_queue_rcv_skb(sk, nskb))
+                       kfree_skb(nskb);
+       }
+
+       read_unlock(&hci_sk_list.lock);
+}
+
 /* Send frame to monitor socket */
 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       struct sock *sk;
        struct sk_buff *skb_copy = NULL;
+       struct hci_mon_hdr *hdr;
        __le16 opcode;
 
        if (!atomic_read(&monitor_promisc))
@@ -251,74 +279,21 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
                return;
        }
 
-       read_lock(&hci_sk_list.lock);
-
-       sk_for_each(sk, &hci_sk_list.head) {
-               struct sk_buff *nskb;
-
-               if (sk->sk_state != BT_BOUND)
-                       continue;
-
-               if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
-                       continue;
-
-               if (!skb_copy) {
-                       struct hci_mon_hdr *hdr;
-
-                       /* Create a private copy with headroom */
-                       skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE,
-                                                     GFP_ATOMIC, true);
-                       if (!skb_copy)
-                               continue;
-
-                       /* Put header before the data */
-                       hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
-                       hdr->opcode = opcode;
-                       hdr->index = cpu_to_le16(hdev->id);
-                       hdr->len = cpu_to_le16(skb->len);
-               }
-
-               nskb = skb_clone(skb_copy, GFP_ATOMIC);
-               if (!nskb)
-                       continue;
-
-               if (sock_queue_rcv_skb(sk, nskb))
-                       kfree_skb(nskb);
-       }
+       /* Create a private copy with headroom */
+       skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
+       if (!skb_copy)
+               return;
 
-       read_unlock(&hci_sk_list.lock);
+       /* Put header before the data */
+       hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
+       hdr->opcode = opcode;
+       hdr->index = cpu_to_le16(hdev->id);
+       hdr->len = cpu_to_le16(skb->len);
 
+       queue_monitor_skb(skb_copy);
        kfree_skb(skb_copy);
 }
 
-static void send_monitor_event(struct sk_buff *skb)
-{
-       struct sock *sk;
-
-       BT_DBG("len %d", skb->len);
-
-       read_lock(&hci_sk_list.lock);
-
-       sk_for_each(sk, &hci_sk_list.head) {
-               struct sk_buff *nskb;
-
-               if (sk->sk_state != BT_BOUND)
-                       continue;
-
-               if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
-                       continue;
-
-               nskb = skb_clone(skb, GFP_ATOMIC);
-               if (!nskb)
-                       continue;
-
-               if (sock_queue_rcv_skb(sk, nskb))
-                       kfree_skb(nskb);
-       }
-
-       read_unlock(&hci_sk_list.lock);
-}
-
 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
 {
        struct hci_mon_hdr *hdr;
@@ -422,7 +397,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
 
                skb = create_monitor_event(hdev, event);
                if (skb) {
-                       send_monitor_event(skb);
+                       queue_monitor_skb(skb);
                        kfree_skb(skb);
                }
        }
@@ -1230,6 +1205,8 @@ int __init hci_sock_init(void)
 {
        int err;
 
+       BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
+
        err = proto_register(&hci_sk_proto, 0);
        if (err < 0)
                return err;
index d04dc009573691cde515d1db82c13cbfd11a459f..6ba33f9631e8e5830374ab4e51720c493969c67c 100644 (file)
@@ -63,10 +63,10 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
                     struct sk_buff_head *skbs, u8 event);
 
-static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
+static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
 {
-       if (hcon->type == LE_LINK) {
-               if (type == ADDR_LE_DEV_PUBLIC)
+       if (link_type == LE_LINK) {
+               if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
                        return BDADDR_LE_PUBLIC;
                else
                        return BDADDR_LE_RANDOM;
@@ -75,6 +75,16 @@ static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
        return BDADDR_BREDR;
 }
 
+static inline u8 bdaddr_src_type(struct hci_conn *hcon)
+{
+       return bdaddr_type(hcon->type, hcon->src_type);
+}
+
+static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
+{
+       return bdaddr_type(hcon->type, hcon->dst_type);
+}
+
 /* ---- L2CAP channels ---- */
 
 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
@@ -646,7 +656,7 @@ static void l2cap_conn_update_id_addr(struct work_struct *work)
        list_for_each_entry(chan, &conn->chan_l, list) {
                l2cap_chan_lock(chan);
                bacpy(&chan->dst, &hcon->dst);
-               chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
+               chan->dst_type = bdaddr_dst_type(hcon);
                l2cap_chan_unlock(chan);
        }
 
@@ -3790,8 +3800,8 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
 
        bacpy(&chan->src, &conn->hcon->src);
        bacpy(&chan->dst, &conn->hcon->dst);
-       chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
-       chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
+       chan->src_type = bdaddr_src_type(conn->hcon);
+       chan->dst_type = bdaddr_dst_type(conn->hcon);
        chan->psm  = psm;
        chan->dcid = scid;
        chan->local_amp_id = amp_id;
@@ -5441,8 +5451,8 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
 
        bacpy(&chan->src, &conn->hcon->src);
        bacpy(&chan->dst, &conn->hcon->dst);
-       chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
-       chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
+       chan->src_type = bdaddr_src_type(conn->hcon);
+       chan->dst_type = bdaddr_dst_type(conn->hcon);
        chan->psm  = psm;
        chan->dcid = scid;
        chan->omtu = mtu;
@@ -6881,7 +6891,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
         */
        if (hcon->type == LE_LINK &&
            hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
-                                  bdaddr_type(hcon, hcon->dst_type))) {
+                                  bdaddr_dst_type(hcon))) {
                kfree_skb(skb);
                return;
        }
@@ -6968,7 +6978,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
 
        if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) &&
            (bredr_sc_enabled(hcon->hdev) ||
-            test_bit(HCI_FORCE_LESC, &hcon->hdev->dbg_flags)))
+            test_bit(HCI_FORCE_BREDR_SMP, &hcon->hdev->dbg_flags)))
                conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
 
        mutex_init(&conn->ident_lock);
@@ -7123,7 +7133,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
 
        /* Update source addr of the socket */
        bacpy(&chan->src, &hcon->src);
-       chan->src_type = bdaddr_type(hcon, hcon->src_type);
+       chan->src_type = bdaddr_src_type(hcon);
 
        __l2cap_chan_add(conn, chan);
 
@@ -7197,8 +7207,10 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
  * global list (by passing NULL as first parameter).
  */
 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
-                                                 bdaddr_t *src, u8 link_type)
+                                                 struct hci_conn *hcon)
 {
+       u8 src_type = bdaddr_src_type(hcon);
+
        read_lock(&chan_list_lock);
 
        if (c)
@@ -7211,11 +7223,9 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
                        continue;
                if (c->state != BT_LISTEN)
                        continue;
-               if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
+               if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
                        continue;
-               if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
-                       continue;
-               if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
+               if (src_type != c->src_type)
                        continue;
 
                l2cap_chan_hold(c);
@@ -7246,7 +7256,7 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
        if (!conn)
                return;
 
-       dst_type = bdaddr_type(hcon, hcon->dst_type);
+       dst_type = bdaddr_dst_type(hcon);
 
        /* If device is blocked, do not create channels for it */
        if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
@@ -7257,7 +7267,7 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
         * we left off, because the list lock would prevent calling the
         * potentially sleeping l2cap_chan_lock() function.
         */
-       pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
+       pchan = l2cap_global_fixed_chan(NULL, hcon);
        while (pchan) {
                struct l2cap_chan *chan, *next;
 
@@ -7270,7 +7280,7 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
                if (chan) {
                        bacpy(&chan->src, &hcon->src);
                        bacpy(&chan->dst, &hcon->dst);
-                       chan->src_type = bdaddr_type(hcon, hcon->src_type);
+                       chan->src_type = bdaddr_src_type(hcon);
                        chan->dst_type = dst_type;
 
                        __l2cap_chan_add(conn, chan);
@@ -7278,8 +7288,7 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
 
                l2cap_chan_unlock(pchan);
 next:
-               next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
-                                              hcon->type);
+               next = l2cap_global_fixed_chan(pchan, hcon);
                l2cap_chan_put(pchan);
                pchan = next;
        }
@@ -7527,8 +7536,8 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
        read_lock(&chan_list_lock);
 
        list_for_each_entry(c, &chan_list, global_l) {
-               seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
-                          &c->src, &c->dst,
+               seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
+                          &c->src, c->src_type, &c->dst, c->dst_type,
                           c->state, __le16_to_cpu(c->psm),
                           c->scid, c->dcid, c->imtu, c->omtu,
                           c->sec_level, c->mode);
index f65caf41953f866fdea55b8eb342fafc9dc8a1c7..60694f0f4c73768dee1db1a4926ced43522dbe83 100644 (file)
@@ -302,7 +302,7 @@ done:
 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
                             int flags)
 {
-       DECLARE_WAITQUEUE(wait, current);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct sock *sk = sock->sk, *nsk;
        long timeo;
        int err = 0;
@@ -316,8 +316,6 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
        /* Wait for an incoming connection. (wake-one). */
        add_wait_queue_exclusive(sk_sleep(sk), &wait);
        while (1) {
-               set_current_state(TASK_INTERRUPTIBLE);
-
                if (sk->sk_state != BT_LISTEN) {
                        err = -EBADFD;
                        break;
@@ -338,10 +336,11 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
                }
 
                release_sock(sk);
-               timeo = schedule_timeout(timeo);
+
+               timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+
                lock_sock_nested(sk, L2CAP_NESTING_PARENT);
        }
-       __set_current_state(TASK_RUNNING);
        remove_wait_queue(sk_sleep(sk), &wait);
 
        if (err)
@@ -1614,6 +1613,8 @@ int __init l2cap_init_sockets(void)
 {
        int err;
 
+       BUILD_BUG_ON(sizeof(struct sockaddr_l2) > sizeof(struct sockaddr));
+
        err = proto_register(&l2cap_proto, 0);
        if (err < 0)
                return err;
index 3d2f7ad1e65534341ce76608bbf9602b9307c987..9ec5390c85eba61c3c3bcb5a813c8d8af326cf33 100644 (file)
@@ -131,6 +131,9 @@ static const u16 mgmt_events[] = {
 
 #define CACHE_TIMEOUT  msecs_to_jiffies(2 * 1000)
 
+#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
+                "\x00\x00\x00\x00\x00\x00\x00\x00"
+
 struct pending_cmd {
        struct list_head list;
        u16 opcode;
@@ -570,8 +573,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
                        settings |= MGMT_SETTING_HS;
                }
 
-               if (lmp_sc_capable(hdev) ||
-                   test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
+               if (lmp_sc_capable(hdev))
                        settings |= MGMT_SETTING_SECURE_CONN;
        }
 
@@ -1252,7 +1254,7 @@ static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
                            sizeof(settings));
 }
 
-static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
+static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        BT_DBG("%s status 0x%02x", hdev->name, status);
 
@@ -1519,7 +1521,8 @@ static u8 mgmt_le_support(struct hci_dev *hdev)
                return MGMT_STATUS_SUCCESS;
 }
 
-static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
+static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
+                                     u16 opcode)
 {
        struct pending_cmd *cmd;
        struct mgmt_mode *cp;
@@ -1778,7 +1781,8 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
                hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
 }
 
-static void set_connectable_complete(struct hci_dev *hdev, u8 status)
+static void set_connectable_complete(struct hci_dev *hdev, u8 status,
+                                    u16 opcode)
 {
        struct pending_cmd *cmd;
        struct mgmt_mode *cp;
@@ -2196,7 +2200,7 @@ unlock:
        return err;
 }
 
-static void le_enable_complete(struct hci_dev *hdev, u8 status)
+static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        struct cmd_lookup match = { NULL, hdev };
 
@@ -2386,7 +2390,7 @@ unlock:
        hci_dev_unlock(hdev);
 }
 
-static void add_uuid_complete(struct hci_dev *hdev, u8 status)
+static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        BT_DBG("status 0x%02x", status);
 
@@ -2465,7 +2469,7 @@ static bool enable_service_cache(struct hci_dev *hdev)
        return false;
 }
 
-static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
+static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        BT_DBG("status 0x%02x", status);
 
@@ -2550,7 +2554,7 @@ unlock:
        return err;
 }
 
-static void set_class_complete(struct hci_dev *hdev, u8 status)
+static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        BT_DBG("status 0x%02x", status);
 
@@ -3484,7 +3488,7 @@ static void update_name(struct hci_request *req)
        hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
 }
 
-static void set_name_complete(struct hci_dev *hdev, u8 status)
+static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        struct mgmt_cp_set_local_name *cp;
        struct pending_cmd *cmd;
@@ -3632,10 +3636,16 @@ unlock:
 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
                               void *data, u16 len)
 {
+       struct mgmt_addr_info *addr = data;
        int err;
 
        BT_DBG("%s ", hdev->name);
 
+       if (!bdaddr_type_is_valid(addr->type))
+               return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
+                                   MGMT_STATUS_INVALID_PARAMS, addr,
+                                   sizeof(*addr));
+
        hci_dev_lock(hdev);
 
        if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
@@ -3662,28 +3672,53 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
                                   status, &cp->addr, sizeof(cp->addr));
        } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
                struct mgmt_cp_add_remote_oob_ext_data *cp = data;
-               u8 *rand192, *hash192;
+               u8 *rand192, *hash192, *rand256, *hash256;
                u8 status;
 
-               if (cp->addr.type != BDADDR_BREDR) {
-                       err = cmd_complete(sk, hdev->id,
-                                          MGMT_OP_ADD_REMOTE_OOB_DATA,
-                                          MGMT_STATUS_INVALID_PARAMS,
-                                          &cp->addr, sizeof(cp->addr));
-                       goto unlock;
-               }
-
                if (bdaddr_type_is_le(cp->addr.type)) {
+                       /* Enforce zero-valued 192-bit parameters as
+                        * long as legacy SMP OOB isn't implemented.
+                        */
+                       if (memcmp(cp->rand192, ZERO_KEY, 16) ||
+                           memcmp(cp->hash192, ZERO_KEY, 16)) {
+                               err = cmd_complete(sk, hdev->id,
+                                                  MGMT_OP_ADD_REMOTE_OOB_DATA,
+                                                  MGMT_STATUS_INVALID_PARAMS,
+                                                  addr, sizeof(*addr));
+                               goto unlock;
+                       }
+
                        rand192 = NULL;
                        hash192 = NULL;
                } else {
-                       rand192 = cp->rand192;
-                       hash192 = cp->hash192;
+                       /* In case one of the P-192 values is set to zero,
+                        * then just disable OOB data for P-192.
+                        */
+                       if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
+                           !memcmp(cp->hash192, ZERO_KEY, 16)) {
+                               rand192 = NULL;
+                               hash192 = NULL;
+                       } else {
+                               rand192 = cp->rand192;
+                               hash192 = cp->hash192;
+                       }
+               }
+
+               /* In case one of the P-256 values is set to zero, then just
+                * disable OOB data for P-256.
+                */
+               if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
+                   !memcmp(cp->hash256, ZERO_KEY, 16)) {
+                       rand256 = NULL;
+                       hash256 = NULL;
+               } else {
+                       rand256 = cp->rand256;
+                       hash256 = cp->hash256;
                }
 
                err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
                                              cp->addr.type, hash192, rand192,
-                                             cp->hash256, cp->rand256);
+                                             hash256, rand256);
                if (err < 0)
                        status = MGMT_STATUS_FAILED;
                else
@@ -3835,7 +3870,8 @@ static bool trigger_discovery(struct hci_request *req, u8 *status)
        return true;
 }
 
-static void start_discovery_complete(struct hci_dev *hdev, u8 status)
+static void start_discovery_complete(struct hci_dev *hdev, u8 status,
+                                    u16 opcode)
 {
        struct pending_cmd *cmd;
        unsigned long timeout;
@@ -3860,6 +3896,9 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
 
        hci_discovery_set_state(hdev, DISCOVERY_FINDING);
 
+       /* If the scan involves LE scan, pick proper timeout to schedule
+        * hdev->le_scan_disable that will stop it.
+        */
        switch (hdev->discovery.type) {
        case DISCOV_TYPE_LE:
                timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
@@ -3876,9 +3915,23 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
                break;
        }
 
-       if (timeout)
+       if (timeout) {
+               /* When service discovery is used and the controller has
+                * a strict duplicate filter, it is important to remember
+                * the start and duration of the scan. This is required
+                * for restarting scanning during the discovery phase.
+                */
+               if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
+                            &hdev->quirks) &&
+                   (hdev->discovery.uuid_count > 0 ||
+                    hdev->discovery.rssi != HCI_RSSI_INVALID)) {
+                       hdev->discovery.scan_start = jiffies;
+                       hdev->discovery.scan_duration = timeout;
+               }
+
                queue_delayed_work(hdev->workqueue,
                                   &hdev->le_scan_disable, timeout);
+       }
 
 unlock:
        hci_dev_unlock(hdev);
@@ -4064,7 +4117,7 @@ failed:
        return err;
 }
 
-static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
+static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        struct pending_cmd *cmd;
 
@@ -4290,7 +4343,8 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
        return err;
 }
 
-static void set_advertising_complete(struct hci_dev *hdev, u8 status)
+static void set_advertising_complete(struct hci_dev *hdev, u8 status,
+                                    u16 opcode)
 {
        struct cmd_lookup match = { NULL, hdev };
 
@@ -4497,7 +4551,8 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
        return err;
 }
 
-static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
+static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
+                                     u16 opcode)
 {
        struct pending_cmd *cmd;
 
@@ -4595,7 +4650,7 @@ unlock:
        return err;
 }
 
-static void set_bredr_complete(struct hci_dev *hdev, u8 status)
+static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        struct pending_cmd *cmd;
 
@@ -4679,6 +4734,28 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
                                 MGMT_STATUS_REJECTED);
                goto unlock;
+       } else {
+               /* When configuring a dual-mode controller to operate
+                * with LE only and using a static address, then switching
+                * BR/EDR back on is not allowed.
+                *
+                * Dual-mode controllers shall operate with the public
+                * address as its identity address for BR/EDR and LE. So
+                * reject the attempt to create an invalid configuration.
+                *
+                * The same restrictions applies when secure connections
+                * has been enabled. For BR/EDR this is a controller feature
+                * while for LE it is a host stack feature. This means that
+                * switching BR/EDR back on when secure connections has been
+                * enabled is not a supported transaction.
+                */
+               if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+                   (bacmp(&hdev->static_addr, BDADDR_ANY) ||
+                    test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
+                       err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+                                        MGMT_STATUS_REJECTED);
+                       goto unlock;
+               }
        }
 
        if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
@@ -4717,30 +4794,80 @@ unlock:
        return err;
 }
 
+static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+{
+       struct pending_cmd *cmd;
+       struct mgmt_mode *cp;
+
+       BT_DBG("%s status %u", hdev->name, status);
+
+       hci_dev_lock(hdev);
+
+       cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
+       if (!cmd)
+               goto unlock;
+
+       if (status) {
+               cmd_status(cmd->sk, cmd->index, cmd->opcode,
+                          mgmt_status(status));
+               goto remove;
+       }
+
+       cp = cmd->param;
+
+       switch (cp->val) {
+       case 0x00:
+               clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+               clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+               break;
+       case 0x01:
+               set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+               clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+               break;
+       case 0x02:
+               set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+               set_bit(HCI_SC_ONLY, &hdev->dev_flags);
+               break;
+       }
+
+       send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
+       new_settings(hdev, cmd->sk);
+
+remove:
+       mgmt_pending_remove(cmd);
+unlock:
+       hci_dev_unlock(hdev);
+}
+
 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
                           void *data, u16 len)
 {
        struct mgmt_mode *cp = data;
        struct pending_cmd *cmd;
+       struct hci_request req;
        u8 val;
        int err;
 
        BT_DBG("request for %s", hdev->name);
 
-       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
-           !lmp_sc_capable(hdev) && !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
+       if (!lmp_sc_capable(hdev) &&
+           !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
                return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
                                  MGMT_STATUS_NOT_SUPPORTED);
 
+       if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+           lmp_sc_capable(hdev) &&
+           !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+                                 MGMT_STATUS_REJECTED);
+
        if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
                return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
                                  MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
-       if (!hdev_is_powered(hdev) ||
-           (!lmp_sc_capable(hdev) &&
-            !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) ||
+       if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
            !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
                bool changed;
 
@@ -4787,17 +4914,14 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
                goto failed;
        }
 
-       err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
+       hci_req_init(&req, hdev);
+       hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
+       err = hci_req_run(&req, sc_enable_complete);
        if (err < 0) {
                mgmt_pending_remove(cmd);
                goto failed;
        }
 
-       if (cp->val == 0x02)
-               set_bit(HCI_SC_ONLY, &hdev->dev_flags);
-       else
-               clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
-
 failed:
        hci_dev_unlock(hdev);
        return err;
@@ -5122,7 +5246,8 @@ static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
        return err;
 }
 
-static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status)
+static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
+                                      u16 opcode)
 {
        struct hci_cp_read_rssi *cp;
        struct pending_cmd *cmd;
@@ -5329,7 +5454,7 @@ complete:
        return err;
 }
 
-static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
+static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        struct hci_cp_read_clock *hci_cp;
        struct pending_cmd *cmd;
@@ -5507,7 +5632,7 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
        mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
 }
 
-static void add_device_complete(struct hci_dev *hdev, u8 status)
+static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        struct pending_cmd *cmd;
 
@@ -5630,7 +5755,7 @@ static void device_removed(struct sock *sk, struct hci_dev *hdev,
        mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
 }
 
-static void remove_device_complete(struct hci_dev *hdev, u8 status)
+static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        struct pending_cmd *cmd;
 
@@ -6208,12 +6333,21 @@ static void restart_le_actions(struct hci_request *req)
        __hci_update_background_scan(req);
 }
 
-static void powered_complete(struct hci_dev *hdev, u8 status)
+static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        struct cmd_lookup match = { NULL, hdev };
 
        BT_DBG("status 0x%02x", status);
 
+       if (!status) {
+               /* Register the available SMP channels (BR/EDR and LE) only
+                * when successfully powering on the controller. This late
+                * registration is required so that LE SMP can clearly
+                * decide if the public address or static address is used.
+                */
+               smp_register(hdev);
+       }
+
        hci_dev_lock(hdev);
 
        mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
@@ -6235,14 +6369,16 @@ static int powered_update_hci(struct hci_dev *hdev)
 
        if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
            !lmp_host_ssp_capable(hdev)) {
-               u8 ssp = 1;
+               u8 mode = 0x01;
 
-               hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
-       }
+               hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
 
-       if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
-               u8 sc = 0x01;
-               hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, sizeof(sc), &sc);
+               if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
+                       u8 support = 0x01;
+
+                       hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
+                                   sizeof(support), &support);
+               }
        }
 
        if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
@@ -6962,43 +7098,6 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
        hci_req_run(&req, NULL);
 }
 
-void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
-{
-       struct cmd_lookup match = { NULL, hdev };
-       bool changed = false;
-
-       if (status) {
-               u8 mgmt_err = mgmt_status(status);
-
-               if (enable) {
-                       if (test_and_clear_bit(HCI_SC_ENABLED,
-                                              &hdev->dev_flags))
-                               new_settings(hdev, NULL);
-                       clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
-               }
-
-               mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
-                                    cmd_status_rsp, &mgmt_err);
-               return;
-       }
-
-       if (enable) {
-               changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
-       } else {
-               changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
-               clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
-       }
-
-       mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
-                            settings_rsp, &match);
-
-       if (changed)
-               new_settings(hdev, match.sk);
-
-       if (match.sk)
-               sock_put(match.sk);
-}
-
 static void sk_lookup(struct pending_cmd *cmd, void *data)
 {
        struct cmd_lookup *match = data;
@@ -7069,28 +7168,21 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
                cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
                           mgmt_status(status));
        } else {
-               if (bredr_sc_enabled(hdev) && hash256 && rand256) {
-                       struct mgmt_rp_read_local_oob_ext_data rp;
+               struct mgmt_rp_read_local_oob_data rp;
+               size_t rp_size = sizeof(rp);
 
-                       memcpy(rp.hash192, hash192, sizeof(rp.hash192));
-                       memcpy(rp.rand192, rand192, sizeof(rp.rand192));
+               memcpy(rp.hash192, hash192, sizeof(rp.hash192));
+               memcpy(rp.rand192, rand192, sizeof(rp.rand192));
 
+               if (bredr_sc_enabled(hdev) && hash256 && rand256) {
                        memcpy(rp.hash256, hash256, sizeof(rp.hash256));
                        memcpy(rp.rand256, rand256, sizeof(rp.rand256));
-
-                       cmd_complete(cmd->sk, hdev->id,
-                                    MGMT_OP_READ_LOCAL_OOB_DATA, 0,
-                                    &rp, sizeof(rp));
                } else {
-                       struct mgmt_rp_read_local_oob_data rp;
-
-                       memcpy(rp.hash, hash192, sizeof(rp.hash));
-                       memcpy(rp.rand, rand192, sizeof(rp.rand));
-
-                       cmd_complete(cmd->sk, hdev->id,
-                                    MGMT_OP_READ_LOCAL_OOB_DATA, 0,
-                                    &rp, sizeof(rp));
+                       rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
                }
+
+               cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0,
+                            &rp, rp_size);
        }
 
        mgmt_pending_remove(cmd);
@@ -7163,6 +7255,21 @@ static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
        return false;
 }
 
+static void restart_le_scan(struct hci_dev *hdev)
+{
+       /* If controller is not scanning we are done. */
+       if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+               return;
+
+       if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
+                      hdev->discovery.scan_start +
+                      hdev->discovery.scan_duration))
+               return;
+
+       queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
+                          DISCOV_LE_RESTART_DELAY);
+}
+
 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
                       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
@@ -7185,14 +7292,18 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
 
        /* When using service discovery with a RSSI threshold, then check
         * if such a RSSI threshold is specified. If a RSSI threshold has
-        * been specified, then all results with a RSSI smaller than the
-        * RSSI threshold will be dropped.
+        * been specified, and HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set,
+        * then all results with a RSSI smaller than the RSSI threshold will be
+        * dropped. If the quirk is set, let it through for further processing,
+        * as we might need to restart the scan.
         *
         * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
         * the results are also dropped.
         */
        if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
-           (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
+           (rssi == HCI_RSSI_INVALID ||
+           (rssi < hdev->discovery.rssi &&
+            !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
                return;
 
        /* Make sure that the buffer is big enough. The 5 extra bytes
@@ -7211,7 +7322,8 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
         * However when using service discovery, the value 127 will be
         * returned when the RSSI is not available.
         */
-       if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi)
+       if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
+           link_type == ACL_LINK)
                rssi = 0;
 
        bacpy(&ev->addr.bdaddr, bdaddr);
@@ -7226,12 +7338,20 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                 * kept and checking possible scan response data
                 * will be skipped.
                 */
-               if (hdev->discovery.uuid_count > 0)
+               if (hdev->discovery.uuid_count > 0) {
                        match = eir_has_uuids(eir, eir_len,
                                              hdev->discovery.uuid_count,
                                              hdev->discovery.uuids);
-               else
+                       /* If duplicate filtering does not report RSSI changes,
+                        * then restart scanning to ensure updated result with
+                        * updated RSSI values.
+                        */
+                       if (match && test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
+                                             &hdev->quirks))
+                               restart_le_scan(hdev);
+               } else {
                        match = true;
+               }
 
                if (!match && !scan_rsp_len)
                        return;
@@ -7264,6 +7384,14 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                                                     hdev->discovery.uuid_count,
                                                     hdev->discovery.uuids))
                                return;
+
+                       /* If duplicate filtering does not report RSSI changes,
+                        * then restart scanning to ensure updated result with
+                        * updated RSSI values.
+                        */
+                       if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
+                                    &hdev->quirks))
+                               restart_le_scan(hdev);
                }
 
                /* Append scan response data to event */
@@ -7277,6 +7405,14 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                        return;
        }
 
+       /* Validate the reported RSSI value against the RSSI threshold once more
+        * incase HCI_QUIRK_STRICT_DUPLICATE_FILTER forced a restart of LE
+        * scanning.
+        */
+       if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
+           rssi < hdev->discovery.rssi)
+               return;
+
        ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
        ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
 
@@ -7319,7 +7455,7 @@ void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
        mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
 }
 
-static void adv_enable_complete(struct hci_dev *hdev, u8 status)
+static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        BT_DBG("%s status %u", hdev->name, status);
 }
index 2348176401a0b19ad3b5e9129999381f43da50a0..3c6d2c8ac1a47bc7f5a96b576ca93e9542ee4fe3 100644 (file)
@@ -468,7 +468,7 @@ done:
 
 static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
 {
-       DECLARE_WAITQUEUE(wait, current);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct sock *sk = sock->sk, *nsk;
        long timeo;
        int err = 0;
@@ -487,8 +487,6 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
        /* Wait for an incoming connection. (wake-one). */
        add_wait_queue_exclusive(sk_sleep(sk), &wait);
        while (1) {
-               set_current_state(TASK_INTERRUPTIBLE);
-
                if (sk->sk_state != BT_LISTEN) {
                        err = -EBADFD;
                        break;
@@ -509,10 +507,11 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
                }
 
                release_sock(sk);
-               timeo = schedule_timeout(timeo);
+
+               timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+
                lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
        }
-       __set_current_state(TASK_RUNNING);
        remove_wait_queue(sk_sleep(sk), &wait);
 
        if (err)
@@ -1058,6 +1057,8 @@ int __init rfcomm_init_sockets(void)
 {
        int err;
 
+       BUILD_BUG_ON(sizeof(struct sockaddr_rc) > sizeof(struct sockaddr));
+
        err = proto_register(&rfcomm_proto, 0);
        if (err < 0)
                return err;
index 30e5ea3f1ad311388bf7f95aec8f70cb88e8fab9..76321b546e8426146dba242c281c2915464e7d6f 100644 (file)
@@ -618,7 +618,7 @@ done:
 
 static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
 {
-       DECLARE_WAITQUEUE(wait, current);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct sock *sk = sock->sk, *ch;
        long timeo;
        int err = 0;
@@ -632,8 +632,6 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
        /* Wait for an incoming connection. (wake-one). */
        add_wait_queue_exclusive(sk_sleep(sk), &wait);
        while (1) {
-               set_current_state(TASK_INTERRUPTIBLE);
-
                if (sk->sk_state != BT_LISTEN) {
                        err = -EBADFD;
                        break;
@@ -654,10 +652,10 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
                }
 
                release_sock(sk);
-               timeo = schedule_timeout(timeo);
+
+               timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
                lock_sock(sk);
        }
-       __set_current_state(TASK_RUNNING);
        remove_wait_queue(sk_sleep(sk), &wait);
 
        if (err)
@@ -1184,6 +1182,8 @@ int __init sco_init(void)
 {
        int err;
 
+       BUILD_BUG_ON(sizeof(struct sockaddr_sco) > sizeof(struct sockaddr));
+
        err = proto_register(&sco_proto, 0);
        if (err < 0)
                return err;
index 9c67315172cf2185457d355df5bb527f85b98d06..378f4064952cfd0fe954511e76d307a486118ca4 100644 (file)
@@ -184,7 +184,7 @@ static int __init test_ecdh(void)
        delta = ktime_sub(rettime, calltime);
        duration = (unsigned long long) ktime_to_ns(delta) >> 10;
 
-       BT_INFO("ECDH test passed in %lld usecs", duration);
+       BT_INFO("ECDH test passed in %llu usecs", duration);
 
        return 0;
 }
index 358264c0e78541e15f7ae2339b116a6161eb35a0..c09a821f381d0b648b45ca5c722d89161775f638 100644 (file)
@@ -20,6 +20,7 @@
    SOFTWARE IS DISCLAIMED.
 */
 
+#include <linux/debugfs.h>
 #include <linux/crypto.h>
 #include <linux/scatterlist.h>
 #include <crypto/b128ops.h>
@@ -299,7 +300,7 @@ static int smp_f6(struct crypto_hash *tfm_cmac, const u8 w[16],
        if (err)
                return err;
 
-       BT_DBG("res %16phN", res);
+       SMP_DBG("res %16phN", res);
 
        return err;
 }
@@ -619,7 +620,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
 
                oob_data = hci_find_remote_oob_data(hdev, &hcon->dst,
                                                    bdaddr_type);
-               if (oob_data) {
+               if (oob_data && oob_data->present) {
                        set_bit(SMP_FLAG_OOB, &smp->flags);
                        oob_flag = SMP_OOB_PRESENT;
                        memcpy(smp->rr, oob_data->rand256, 16);
@@ -1675,7 +1676,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
        if (conn->hcon->type == ACL_LINK) {
                /* We must have a BR/EDR SC link */
                if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) &&
-                   !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
+                   !test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
                        return SMP_CROSS_TRANSP_NOT_ALLOWED;
 
                set_bit(SMP_FLAG_SC, &smp->flags);
@@ -2304,8 +2305,12 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
         * implementations are not known of and in order to not over
         * complicate our implementation, simply pretend that we never
         * received an IRK for such a device.
+        *
+        * The Identity Address must also be a Static Random or Public
+        * Address, which hci_is_identity_address() checks for.
         */
-       if (!bacmp(&info->bdaddr, BDADDR_ANY)) {
+       if (!bacmp(&info->bdaddr, BDADDR_ANY) ||
+           !hci_is_identity_address(&info->bdaddr, info->addr_type)) {
                BT_ERR("Ignoring IRK with no identity address");
                goto distribute;
        }
@@ -2738,7 +2743,7 @@ static void bredr_pairing(struct l2cap_chan *chan)
 
        /* BR/EDR must use Secure Connections for SMP */
        if (!test_bit(HCI_CONN_AES_CCM, &hcon->flags) &&
-           !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
+           !test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
                return;
 
        /* If our LE support is not enabled don't do anything */
@@ -2945,11 +2950,30 @@ create_chan:
 
        l2cap_chan_set_defaults(chan);
 
-       bacpy(&chan->src, &hdev->bdaddr);
-       if (cid == L2CAP_CID_SMP)
-               chan->src_type = BDADDR_LE_PUBLIC;
-       else
+       if (cid == L2CAP_CID_SMP) {
+               /* If usage of static address is forced or if the devices
+                * does not have a public address, then listen on the static
+                * address.
+                *
+                * In case BR/EDR has been disabled on a dual-mode controller
+                * and a static address has been configued, then listen on
+                * the static address instead.
+                */
+               if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
+                   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
+                   (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
+                    bacmp(&hdev->static_addr, BDADDR_ANY))) {
+                       bacpy(&chan->src, &hdev->static_addr);
+                       chan->src_type = BDADDR_LE_RANDOM;
+               } else {
+                       bacpy(&chan->src, &hdev->bdaddr);
+                       chan->src_type = BDADDR_LE_PUBLIC;
+               }
+       } else {
+               bacpy(&chan->src, &hdev->bdaddr);
                chan->src_type = BDADDR_BREDR;
+       }
+
        chan->state = BT_LISTEN;
        chan->mode = L2CAP_MODE_BASIC;
        chan->imtu = L2CAP_DEFAULT_MTU;
@@ -2976,21 +3000,108 @@ static void smp_del_chan(struct l2cap_chan *chan)
        l2cap_chan_put(chan);
 }
 
+static ssize_t force_bredr_smp_read(struct file *file,
+                                   char __user *user_buf,
+                                   size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[3];
+
+       buf[0] = test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags) ? 'Y': 'N';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_bredr_smp_write(struct file *file,
+                                    const char __user *user_buf,
+                                    size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[32];
+       size_t buf_size = min(count, (sizeof(buf)-1));
+       bool enable;
+
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       buf[buf_size] = '\0';
+       if (strtobool(buf, &enable))
+               return -EINVAL;
+
+       if (enable == test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
+               return -EALREADY;
+
+       if (enable) {
+               struct l2cap_chan *chan;
+
+               chan = smp_add_cid(hdev, L2CAP_CID_SMP_BREDR);
+               if (IS_ERR(chan))
+                       return PTR_ERR(chan);
+
+               hdev->smp_bredr_data = chan;
+       } else {
+               struct l2cap_chan *chan;
+
+               chan = hdev->smp_bredr_data;
+               hdev->smp_bredr_data = NULL;
+               smp_del_chan(chan);
+       }
+
+       change_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags);
+
+       return count;
+}
+
+static const struct file_operations force_bredr_smp_fops = {
+       .open           = simple_open,
+       .read           = force_bredr_smp_read,
+       .write          = force_bredr_smp_write,
+       .llseek         = default_llseek,
+};
+
 int smp_register(struct hci_dev *hdev)
 {
        struct l2cap_chan *chan;
 
        BT_DBG("%s", hdev->name);
 
+       /* If the controller does not support Low Energy operation, then
+        * there is also no need to register any SMP channel.
+        */
+       if (!lmp_le_capable(hdev))
+               return 0;
+
+       if (WARN_ON(hdev->smp_data)) {
+               chan = hdev->smp_data;
+               hdev->smp_data = NULL;
+               smp_del_chan(chan);
+       }
+
        chan = smp_add_cid(hdev, L2CAP_CID_SMP);
        if (IS_ERR(chan))
                return PTR_ERR(chan);
 
        hdev->smp_data = chan;
 
-       if (!lmp_sc_capable(hdev) &&
-           !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
+       /* If the controller does not support BR/EDR Secure Connections
+        * feature, then the BR/EDR SMP channel shall not be present.
+        *
+        * To test this with Bluetooth 4.0 controllers, create a debugfs
+        * switch that allows forcing BR/EDR SMP support and accepting
+        * cross-transport pairing on non-AES encrypted connections.
+        */
+       if (!lmp_sc_capable(hdev)) {
+               debugfs_create_file("force_bredr_smp", 0644, hdev->debugfs,
+                                   hdev, &force_bredr_smp_fops);
                return 0;
+       }
+
+       if (WARN_ON(hdev->smp_bredr_data)) {
+               chan = hdev->smp_bredr_data;
+               hdev->smp_bredr_data = NULL;
+               smp_del_chan(chan);
+       }
 
        chan = smp_add_cid(hdev, L2CAP_CID_SMP_BREDR);
        if (IS_ERR(chan)) {
@@ -3317,7 +3428,7 @@ static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
        delta = ktime_sub(rettime, calltime);
        duration = (unsigned long long) ktime_to_ns(delta) >> 10;
 
-       BT_INFO("SMP test passed in %lld usecs", duration);
+       BT_INFO("SMP test passed in %llu usecs", duration);
 
        return 0;
 }
index 44425aff7cba15f93c659fd0502a7b650d050b5e..fb57ab6b24f9ef8feea780179ad2e8284a9e532f 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/llc.h>
 #include <net/llc.h>
 #include <net/stp.h>
+#include <net/switchdev.h>
 
 #include "br_private.h"
 
@@ -120,6 +121,48 @@ static struct notifier_block br_device_notifier = {
        .notifier_call = br_device_event
 };
 
+static int br_netdev_switch_event(struct notifier_block *unused,
+                                 unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_switch_notifier_info_to_dev(ptr);
+       struct net_bridge_port *p;
+       struct net_bridge *br;
+       struct netdev_switch_notifier_fdb_info *fdb_info;
+       int err = NOTIFY_DONE;
+
+       rtnl_lock();
+       p = br_port_get_rtnl(dev);
+       if (!p)
+               goto out;
+
+       br = p->br;
+
+       switch (event) {
+       case NETDEV_SWITCH_FDB_ADD:
+               fdb_info = ptr;
+               err = br_fdb_external_learn_add(br, p, fdb_info->addr,
+                                               fdb_info->vid);
+               if (err)
+                       err = notifier_from_errno(err);
+               break;
+       case NETDEV_SWITCH_FDB_DEL:
+               fdb_info = ptr;
+               err = br_fdb_external_learn_del(br, p, fdb_info->addr,
+                                               fdb_info->vid);
+               if (err)
+                       err = notifier_from_errno(err);
+               break;
+       }
+
+out:
+       rtnl_unlock();
+       return err;
+}
+
+static struct notifier_block br_netdev_switch_notifier = {
+       .notifier_call = br_netdev_switch_event,
+};
+
 static void __net_exit br_net_exit(struct net *net)
 {
        struct net_device *dev;
@@ -169,10 +212,14 @@ static int __init br_init(void)
        if (err)
                goto err_out3;
 
-       err = br_netlink_init();
+       err = register_netdev_switch_notifier(&br_netdev_switch_notifier);
        if (err)
                goto err_out4;
 
+       err = br_netlink_init();
+       if (err)
+               goto err_out5;
+
        brioctl_set(br_ioctl_deviceless_stub);
 
 #if IS_ENABLED(CONFIG_ATM_LANE)
@@ -185,6 +232,8 @@ static int __init br_init(void)
 
        return 0;
 
+err_out5:
+       unregister_netdev_switch_notifier(&br_netdev_switch_notifier);
 err_out4:
        unregister_netdevice_notifier(&br_device_notifier);
 err_out3:
@@ -202,6 +251,7 @@ static void __exit br_deinit(void)
 {
        stp_proto_unregister(&br_stp_proto);
        br_netlink_fini();
+       unregister_netdev_switch_notifier(&br_netdev_switch_notifier);
        unregister_netdevice_notifier(&br_device_notifier);
        brioctl_set(NULL);
        unregister_pernet_subsys(&br_net_ops);
index cc36e59db7d75203d89017e8d458b72370df33da..08bf04bdac584ddfe6329967727d5afa71dac264 100644 (file)
@@ -633,7 +633,8 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
        if (fdb->vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id))
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -686,6 +687,9 @@ int br_fdb_dump(struct sk_buff *skb,
        if (!(dev->priv_flags & IFF_EBRIDGE))
                goto out;
 
+       if (!filter_dev)
+               idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
+
        for (i = 0; i < BR_HASH_SIZE; i++) {
                struct net_bridge_fdb_entry *f;
 
@@ -697,7 +701,7 @@ int br_fdb_dump(struct sk_buff *skb,
                            (!f->dst || f->dst->dev != filter_dev)) {
                                if (filter_dev != dev)
                                        goto skip;
-                               /* !f->dst is a speacial case for bridge
+                               /* !f->dst is a special case for bridge
                                 * It means the MAC belongs to the bridge
                                 * Therefore need a little more filtering
                                 * we only want to dump the !f->dst case
@@ -705,6 +709,8 @@ int br_fdb_dump(struct sk_buff *skb,
                                if (f->dst)
                                        goto skip;
                        }
+                       if (!filter_dev && f->dst)
+                               goto skip;
 
                        if (fdb_fill_info(skb, br, f,
                                          NETLINK_CB(cb->skb).portid,
@@ -985,26 +991,14 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
        }
 }
 
-int br_fdb_external_learn_add(struct net_device *dev,
+int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
                              const unsigned char *addr, u16 vid)
 {
-       struct net_bridge_port *p;
-       struct net_bridge *br;
        struct hlist_head *head;
        struct net_bridge_fdb_entry *fdb;
        int err = 0;
 
-       rtnl_lock();
-
-       p = br_port_get_rtnl(dev);
-       if (!p) {
-               pr_info("bridge: %s not a bridge port\n", dev->name);
-               err = -EINVAL;
-               goto err_rtnl_unlock;
-       }
-
-       br = p->br;
-
+       ASSERT_RTNL();
        spin_lock_bh(&br->hash_lock);
 
        head = &br->hash[br_mac_hash(addr, vid)];
@@ -1029,33 +1023,18 @@ int br_fdb_external_learn_add(struct net_device *dev,
 
 err_unlock:
        spin_unlock_bh(&br->hash_lock);
-err_rtnl_unlock:
-       rtnl_unlock();
 
        return err;
 }
-EXPORT_SYMBOL(br_fdb_external_learn_add);
 
-int br_fdb_external_learn_del(struct net_device *dev,
+int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
                              const unsigned char *addr, u16 vid)
 {
-       struct net_bridge_port *p;
-       struct net_bridge *br;
        struct hlist_head *head;
        struct net_bridge_fdb_entry *fdb;
        int err = 0;
 
-       rtnl_lock();
-
-       p = br_port_get_rtnl(dev);
-       if (!p) {
-               pr_info("bridge: %s not a bridge port\n", dev->name);
-               err = -EINVAL;
-               goto err_rtnl_unlock;
-       }
-
-       br = p->br;
-
+       ASSERT_RTNL();
        spin_lock_bh(&br->hash_lock);
 
        head = &br->hash[br_mac_hash(addr, vid)];
@@ -1066,9 +1045,6 @@ int br_fdb_external_learn_del(struct net_device *dev,
                err = -ENOENT;
 
        spin_unlock_bh(&br->hash_lock);
-err_rtnl_unlock:
-       rtnl_unlock();
 
        return err;
 }
-EXPORT_SYMBOL(br_fdb_external_learn_del);
index ed307db7a12b64531d0cdf2a89ccb53b7356ad4c..b087d278c6793f48f413082ff60fd8abbea49ff3 100644 (file)
@@ -424,6 +424,7 @@ netdev_features_t br_features_recompute(struct net_bridge *br,
                features = netdev_increment_features(features,
                                                     p->dev->features, mask);
        }
+       features = netdev_add_tso_features(features, mask);
 
        return features;
 }
@@ -435,10 +436,16 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
        int err = 0;
        bool changed_addr;
 
-       /* Don't allow bridging non-ethernet like devices */
+       /* Don't allow bridging non-ethernet like devices, or DSA-enabled
+        * master network devices since the bridge layer rx_handler prevents
+        * the DSA fake ethertype handler to be invoked, so we do not strip off
+        * the DSA switch tag protocol header and the bridge layer just return
+        * RX_HANDLER_CONSUMED, stopping RX processing for these frames.
+        */
        if ((dev->flags & IFF_LOOPBACK) ||
            dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
-           !is_valid_ether_addr(dev->dev_addr))
+           !is_valid_ether_addr(dev->dev_addr) ||
+           netdev_uses_dsa(dev))
                return -EINVAL;
 
        /* No bridging of bridges */
index 1f1de715197c19ab12f5333e9a518a317754f041..e2aa7be3a847f448a404e0a43f6d1a09f1a0517a 100644 (file)
@@ -154,7 +154,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
        dst = NULL;
 
        if (is_broadcast_ether_addr(dest)) {
-               if (p->flags & BR_PROXYARP &&
+               if (IS_ENABLED(CONFIG_INET) &&
+                   p->flags & BR_PROXYARP &&
                    skb->protocol == htons(ETH_P_ARP))
                        br_do_proxy_arp(skb, br, vid);
 
index 5df05269d17a6f9b478aa6d90d5c322f0708e0c4..409608960899630b5349bbd310200a95dc2867a2 100644 (file)
@@ -190,7 +190,8 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
 
        nla_nest_end(skb, nest2);
        nla_nest_end(skb, nest);
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 end:
        nla_nest_end(skb, nest);
@@ -276,7 +277,7 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct net_device *dev;
        int err;
 
-       err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY, NULL);
+       err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL);
        if (err < 0)
                return err;
 
index c190d22b6b3d9e96ce91c182ea561a6c02c6e125..65728e0dc4ffd011fe87440510b76d34aa1b6ecb 100644 (file)
@@ -66,17 +66,17 @@ static int brnf_pass_vlan_indev __read_mostly = 0;
 #endif
 
 #define IS_IP(skb) \
-       (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
+       (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
 
 #define IS_IPV6(skb) \
-       (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
+       (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
 
 #define IS_ARP(skb) \
-       (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
+       (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
 
 static inline __be16 vlan_proto(const struct sk_buff *skb)
 {
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                return skb->protocol;
        else if (skb->protocol == htons(ETH_P_8021Q))
                return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -436,11 +436,11 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct
        struct net_device *vlan, *br;
 
        br = bridge_parent(dev);
-       if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
+       if (brnf_pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
                return br;
 
        vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
-                                   vlan_tx_tag_get(skb) & VLAN_VID_MASK);
+                                   skb_vlan_tag_get(skb) & VLAN_VID_MASK);
 
        return vlan ? vlan : br;
 }
index 9f5eb55a4d3a7e4b51cc3fa3fa0783a52e6f98fa..4fbcea0e7ecb41cbad57e3320d1f46eeb752f244 100644 (file)
@@ -16,6 +16,7 @@
 #include <net/rtnetlink.h>
 #include <net/net_namespace.h>
 #include <net/sock.h>
+#include <net/switchdev.h>
 #include <uapi/linux/if_bridge.h>
 
 #include "br_private.h"
@@ -67,6 +68,120 @@ static int br_port_fill_attrs(struct sk_buff *skb,
        return 0;
 }
 
+static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
+                                   u16 vid_end, u16 flags)
+{
+       struct  bridge_vlan_info vinfo;
+
+       if ((vid_end - vid_start) > 0) {
+               /* add range to skb */
+               vinfo.vid = vid_start;
+               vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
+               if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
+                           sizeof(vinfo), &vinfo))
+                       goto nla_put_failure;
+
+               vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
+
+               vinfo.vid = vid_end;
+               vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
+               if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
+                           sizeof(vinfo), &vinfo))
+                       goto nla_put_failure;
+       } else {
+               vinfo.vid = vid_start;
+               vinfo.flags = flags;
+               if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
+                           sizeof(vinfo), &vinfo))
+                       goto nla_put_failure;
+       }
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
+                                        const struct net_port_vlans *pv)
+{
+       u16 vid_range_start = 0, vid_range_end = 0;
+       u16 vid_range_flags = 0;
+       u16 pvid, vid, flags;
+       int err = 0;
+
+       /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
+        * and mark vlan info with begin and end flags
+        * if vlaninfo represents a range
+        */
+       pvid = br_get_pvid(pv);
+       for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+               flags = 0;
+               if (vid == pvid)
+                       flags |= BRIDGE_VLAN_INFO_PVID;
+
+               if (test_bit(vid, pv->untagged_bitmap))
+                       flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+
+               if (vid_range_start == 0) {
+                       goto initvars;
+               } else if ((vid - vid_range_end) == 1 &&
+                       flags == vid_range_flags) {
+                       vid_range_end = vid;
+                       continue;
+               } else {
+                       err = br_fill_ifvlaninfo_range(skb, vid_range_start,
+                                                      vid_range_end,
+                                                      vid_range_flags);
+                       if (err)
+                               return err;
+               }
+
+initvars:
+               vid_range_start = vid;
+               vid_range_end = vid;
+               vid_range_flags = flags;
+       }
+
+       if (vid_range_start != 0) {
+               /* Call it once more to send any left over vlans */
+               err = br_fill_ifvlaninfo_range(skb, vid_range_start,
+                                              vid_range_end,
+                                              vid_range_flags);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int br_fill_ifvlaninfo(struct sk_buff *skb,
+                             const struct net_port_vlans *pv)
+{
+       struct bridge_vlan_info vinfo;
+       u16 pvid, vid;
+
+       pvid = br_get_pvid(pv);
+       for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+               vinfo.vid = vid;
+               vinfo.flags = 0;
+               if (vid == pvid)
+                       vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
+
+               if (test_bit(vid, pv->untagged_bitmap))
+                       vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+
+               if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
+                           sizeof(vinfo), &vinfo))
+                       goto nla_put_failure;
+       }
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
 /*
  * Create one netlink message for one interface
  * Contains port and master info as well as carrier and bridge state.
@@ -121,12 +236,11 @@ static int br_fill_ifinfo(struct sk_buff *skb,
        }
 
        /* Check if  the VID information is requested */
-       if (filter_mask & RTEXT_FILTER_BRVLAN) {
-               struct nlattr *af;
+       if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
+           (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
                const struct net_port_vlans *pv;
-               struct bridge_vlan_info vinfo;
-               u16 vid;
-               u16 pvid;
+               struct nlattr *af;
+               int err;
 
                if (port)
                        pv = nbp_get_vlan_info(port);
@@ -140,26 +254,18 @@ static int br_fill_ifinfo(struct sk_buff *skb,
                if (!af)
                        goto nla_put_failure;
 
-               pvid = br_get_pvid(pv);
-               for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
-                       vinfo.vid = vid;
-                       vinfo.flags = 0;
-                       if (vid == pvid)
-                               vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
-
-                       if (test_bit(vid, pv->untagged_bitmap))
-                               vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
-
-                       if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
-                                   sizeof(vinfo), &vinfo))
-                               goto nla_put_failure;
-               }
-
+               if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
+                       err = br_fill_ifvlaninfo_compressed(skb, pv);
+               else
+                       err = br_fill_ifvlaninfo(skb, pv);
+               if (err)
+                       goto nla_put_failure;
                nla_nest_end(skb, af);
        }
 
 done:
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -206,69 +312,99 @@ errout:
 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
               struct net_device *dev, u32 filter_mask)
 {
-       int err = 0;
        struct net_bridge_port *port = br_port_get_rtnl(dev);
 
-       if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN))
-               goto out;
+       if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
+           !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
+               return 0;
 
-       err = br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI,
-                            filter_mask, dev);
-out:
-       return err;
+       return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI,
+                             filter_mask, dev);
 }
 
-static const struct nla_policy ifla_br_policy[IFLA_MAX+1] = {
-       [IFLA_BRIDGE_FLAGS]     = { .type = NLA_U16 },
-       [IFLA_BRIDGE_MODE]      = { .type = NLA_U16 },
-       [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
-                                   .len = sizeof(struct bridge_vlan_info), },
-};
+static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
+                       int cmd, struct bridge_vlan_info *vinfo)
+{
+       int err = 0;
+
+       switch (cmd) {
+       case RTM_SETLINK:
+               if (p) {
+                       err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
+                       if (err)
+                               break;
+
+                       if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
+                               err = br_vlan_add(p->br, vinfo->vid,
+                                                 vinfo->flags);
+               } else {
+                       err = br_vlan_add(br, vinfo->vid, vinfo->flags);
+               }
+               break;
+
+       case RTM_DELLINK:
+               if (p) {
+                       nbp_vlan_delete(p, vinfo->vid);
+                       if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
+                               br_vlan_delete(p->br, vinfo->vid);
+               } else {
+                       br_vlan_delete(br, vinfo->vid);
+               }
+               break;
+       }
+
+       return err;
+}
 
 static int br_afspec(struct net_bridge *br,
                     struct net_bridge_port *p,
                     struct nlattr *af_spec,
                     int cmd)
 {
-       struct nlattr *tb[IFLA_BRIDGE_MAX+1];
+       struct bridge_vlan_info *vinfo_start = NULL;
+       struct bridge_vlan_info *vinfo = NULL;
+       struct nlattr *attr;
        int err = 0;
+       int rem;
 
-       err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, af_spec, ifla_br_policy);
-       if (err)
-               return err;
+       nla_for_each_nested(attr, af_spec, rem) {
+               if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
+                       continue;
+               if (nla_len(attr) != sizeof(struct bridge_vlan_info))
+                       return -EINVAL;
+               vinfo = nla_data(attr);
+               if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
+                       if (vinfo_start)
+                               return -EINVAL;
+                       vinfo_start = vinfo;
+                       continue;
+               }
 
-       if (tb[IFLA_BRIDGE_VLAN_INFO]) {
-               struct bridge_vlan_info *vinfo;
+               if (vinfo_start) {
+                       struct bridge_vlan_info tmp_vinfo;
+                       int v;
 
-               vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
+                       if (!(vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END))
+                               return -EINVAL;
 
-               if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
-                       return -EINVAL;
+                       if (vinfo->vid <= vinfo_start->vid)
+                               return -EINVAL;
 
-               switch (cmd) {
-               case RTM_SETLINK:
-                       if (p) {
-                               err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
+                       memcpy(&tmp_vinfo, vinfo_start,
+                              sizeof(struct bridge_vlan_info));
+
+                       for (v = vinfo_start->vid; v <= vinfo->vid; v++) {
+                               tmp_vinfo.vid = v;
+                               err = br_vlan_info(br, p, cmd, &tmp_vinfo);
                                if (err)
                                        break;
-
-                               if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
-                                       err = br_vlan_add(p->br, vinfo->vid,
-                                                         vinfo->flags);
-                       } else
-                               err = br_vlan_add(br, vinfo->vid, vinfo->flags);
-
-                       break;
-
-               case RTM_DELLINK:
-                       if (p) {
-                               nbp_vlan_delete(p, vinfo->vid);
-                               if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
-                                       br_vlan_delete(p->br, vinfo->vid);
-                       } else
-                               br_vlan_delete(br, vinfo->vid);
-                       break;
+                       }
+                       vinfo_start = NULL;
+               } else {
+                       err = br_vlan_info(br, p, cmd, vinfo);
                }
+               if (err)
+                       break;
        }
 
        return err;
@@ -359,13 +495,13 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
 }
 
 /* Change state and parameters on port. */
-int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
+int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
 {
        struct nlattr *protinfo;
        struct nlattr *afspec;
        struct net_bridge_port *p;
        struct nlattr *tb[IFLA_BRPORT_MAX + 1];
-       int err = 0;
+       int err = 0, ret_offload = 0;
 
        protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
        afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
@@ -407,19 +543,28 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
                                afspec, RTM_SETLINK);
        }
 
+       if (p && !(flags & BRIDGE_FLAGS_SELF)) {
+               /* set bridge attributes in hardware if supported
+                */
+               ret_offload = netdev_switch_port_bridge_setlink(dev, nlh,
+                                                               flags);
+               if (ret_offload && ret_offload != -EOPNOTSUPP)
+                       br_warn(p->br, "error setting attrs on port %u(%s)\n",
+                               (unsigned int)p->port_no, p->dev->name);
+       }
+
        if (err == 0)
                br_ifinfo_notify(RTM_NEWLINK, p);
-
 out:
        return err;
 }
 
 /* Delete port information */
-int br_dellink(struct net_device *dev, struct nlmsghdr *nlh)
+int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
 {
        struct nlattr *afspec;
        struct net_bridge_port *p;
-       int err;
+       int err = 0, ret_offload = 0;
 
        afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
        if (!afspec)
@@ -432,6 +577,21 @@ int br_dellink(struct net_device *dev, struct nlmsghdr *nlh)
 
        err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
                        afspec, RTM_DELLINK);
+       if (err == 0)
+               /* Send RTM_NEWLINK because userspace
+                * expects RTM_NEWLINK for vlan dels
+                */
+               br_ifinfo_notify(RTM_NEWLINK, p);
+
+       if (p && !(flags & BRIDGE_FLAGS_SELF)) {
+               /* del bridge attributes in hardware
+                */
+               ret_offload = netdev_switch_port_bridge_dellink(dev, nlh,
+                                                               flags);
+               if (ret_offload && ret_offload != -EOPNOTSUPP)
+                       br_warn(p->br, "error deleting attrs on port %u (%s)\n",
+                               (unsigned int)p->port_no, p->dev->name);
+       }
 
        return err;
 }
@@ -561,7 +721,7 @@ static size_t br_get_link_af_size(const struct net_device *dev)
        return pv->num_vlans * nla_total_size(sizeof(struct bridge_vlan_info));
 }
 
-static struct rtnl_af_ops br_af_ops = {
+static struct rtnl_af_ops br_af_ops __read_mostly = {
        .family                 = AF_BRIDGE,
        .get_link_af_size       = br_get_link_af_size,
 };
index aea3d1339b3f3d248a9d5517725dbe3732c03d8e..de0919975a25318093cfa640231239f58c113f61 100644 (file)
@@ -402,6 +402,10 @@ int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                struct net_device *dev, struct net_device *fdev, int idx);
 int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
 void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
+int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
+                             const unsigned char *addr, u16 vid);
+int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
+                             const unsigned char *addr, u16 vid);
 
 /* br_forward.c */
 void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
@@ -628,8 +632,8 @@ static inline int br_vlan_get_tag(const struct sk_buff *skb, u16 *vid)
 {
        int err = 0;
 
-       if (vlan_tx_tag_present(skb))
-               *vid = vlan_tx_tag_get(skb) & VLAN_VID_MASK;
+       if (skb_vlan_tag_present(skb))
+               *vid = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
        else {
                *vid = 0;
                err = -EINVAL;
@@ -815,8 +819,8 @@ extern struct rtnl_link_ops br_link_ops;
 int br_netlink_init(void);
 void br_netlink_fini(void);
 void br_ifinfo_notify(int event, struct net_bridge_port *port);
-int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
-int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg);
+int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
+int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
               u32 filter_mask);
 
index 97b8ddf573634b3d7fe9a40979ea5db0c3e01377..13013fe8db246d2002944d25b61f4db612116d52 100644 (file)
@@ -187,7 +187,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
         * sent from vlan device on the bridge device, it does not have
         * HW accelerated vlan tag.
         */
-       if (unlikely(!vlan_tx_tag_present(skb) &&
+       if (unlikely(!skb_vlan_tag_present(skb) &&
                     skb->protocol == proto)) {
                skb = skb_vlan_untag(skb);
                if (unlikely(!skb))
@@ -200,7 +200,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
                        /* Protocol-mismatch, empty out vlan_tci for new tag */
                        skb_push(skb, ETH_HLEN);
                        skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
-                                                       vlan_tx_tag_get(skb));
+                                                       skb_vlan_tag_get(skb));
                        if (unlikely(!skb))
                                return false;
 
index 8d3f8c7651f0dcec64bc7cac84a7f9c3d8953352..6185688881285f356f98c0ab328fd91da45b1239 100644 (file)
@@ -45,8 +45,8 @@ ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par)
        /* VLAN encapsulated Type/Length field, given from orig frame */
        __be16 encap;
 
-       if (vlan_tx_tag_present(skb)) {
-               TCI = vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               TCI = skb_vlan_tag_get(skb);
                encap = skb->protocol;
        } else {
                const struct vlan_hdr *fp;
index d9a8c05d995d14466d2cef02fcd571d4ad009272..91180a7fc94376ea3ca7eecf274c03c3bc919590 100644 (file)
@@ -133,7 +133,7 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
        __be16 ethproto;
        int verdict, i;
 
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                ethproto = htons(ETH_P_8021Q);
        else
                ethproto = h->h_proto;
index b0330aecbf974e10ec8f64d3e93b012be5d689be..3244aead09267dd77b0392a0aac7afa462593305 100644 (file)
@@ -265,22 +265,12 @@ out:
        data[NFT_REG_VERDICT].verdict = NF_DROP;
 }
 
-static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain)
+static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
+                                     const struct nft_expr *expr,
+                                     const struct nft_data **data)
 {
-       struct nft_base_chain *basechain;
-
-       if (chain->flags & NFT_BASE_CHAIN) {
-               basechain = nft_base_chain(chain);
-
-               switch (basechain->ops[0].hooknum) {
-               case NF_BR_PRE_ROUTING:
-               case NF_BR_LOCAL_IN:
-                       break;
-               default:
-                       return -EOPNOTSUPP;
-               }
-       }
-       return 0;
+       return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
+                                                   (1 << NF_BR_LOCAL_IN));
 }
 
 static int nft_reject_bridge_init(const struct nft_ctx *ctx,
@@ -290,7 +280,7 @@ static int nft_reject_bridge_init(const struct nft_ctx *ctx,
        struct nft_reject *priv = nft_expr_priv(expr);
        int icmp_code, err;
 
-       err = nft_reject_bridge_validate_hooks(ctx->chain);
+       err = nft_reject_bridge_validate(ctx, expr, NULL);
        if (err < 0)
                return err;
 
@@ -341,13 +331,6 @@ nla_put_failure:
        return -1;
 }
 
-static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
-                                     const struct nft_expr *expr,
-                                     const struct nft_data **data)
-{
-       return nft_reject_bridge_validate_hooks(ctx->chain);
-}
-
 static struct nft_expr_type nft_reject_bridge_type;
 static const struct nft_expr_ops nft_reject_bridge_ops = {
        .type           = &nft_reject_bridge_type,
index 4589ff67bfa95f2ab5ce6c6ccccd48f970a21807..67a4a36febd1a6bf554a36b4690272777cb008a5 100644 (file)
@@ -470,7 +470,6 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
        ASSERT_RTNL();
        caifdev = netdev_priv(dev);
        caif_netlink_parms(data, &caifdev->conn_req);
-       dev_net_set(caifdev->netdev, src_net);
 
        ret = register_netdevice(dev);
        if (ret)
index 295f62e62eb34bf4050eb0657c48426e583b65b4..a6f448e18ea8c97e28f02a5a50805d9629c6c7f6 100644 (file)
@@ -575,7 +575,8 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
                        goto cancel;
        }
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 cancel:
        nlmsg_cancel(skb, nlh);
index 15845814a0f25eaefb95590cb848aa6032a8e038..ba6eb17226da424d59bb462a2089186bbd3233af 100644 (file)
@@ -676,7 +676,7 @@ static int calcu_signature(struct ceph_x_authorizer *au,
        int ret;
        char tmp_enc[40];
        __le32 tmp[5] = {
-               16u, msg->hdr.crc, msg->footer.front_crc,
+               cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc,
                msg->footer.middle_crc, msg->footer.data_crc,
        };
        ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
index a83062ceeec90660ee5b384fc2a758a70bbf1049..f2148e22b14897727faeba297e045f2b933a52b1 100644 (file)
@@ -717,7 +717,7 @@ static int get_poolop_reply_buf(const char *src, size_t src_len,
        if (src_len != sizeof(u32) + dst_len)
                return -EINVAL;
 
-       buf_len = le32_to_cpu(*(u32 *)src);
+       buf_len = le32_to_cpu(*(__le32 *)src);
        if (buf_len != dst_len)
                return -EINVAL;
 
index 235e6c50708d73baf290679c45affc564202042b..fec0856dd6c031a2ae369410fc5d7f9c25a1fcf6 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the Linux networking core.
 #
 
-obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
+obj-y := sock.o request_sock.o skbuff.o datagram.o stream.o scm.o \
         gen_stats.o gen_estimator.o net_namespace.o secure_seq.o flow_dissector.o
 
 obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
index 683d493aa1bf2225ac0c029ac403841f7d3c740e..8be38675e1a861eb84090e1960dfe44e5fa17ed0 100644 (file)
@@ -371,9 +371,10 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
 static inline struct list_head *ptype_head(const struct packet_type *pt)
 {
        if (pt->type == htons(ETH_P_ALL))
-               return &ptype_all;
+               return pt->dev ? &pt->dev->ptype_all : &ptype_all;
        else
-               return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
+               return pt->dev ? &pt->dev->ptype_specific :
+                                &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
 }
 
 /**
@@ -1734,6 +1735,23 @@ static inline int deliver_skb(struct sk_buff *skb,
        return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
 }
 
+static inline void deliver_ptype_list_skb(struct sk_buff *skb,
+                                         struct packet_type **pt,
+                                         struct net_device *dev, __be16 type,
+                                         struct list_head *ptype_list)
+{
+       struct packet_type *ptype, *pt_prev = *pt;
+
+       list_for_each_entry_rcu(ptype, ptype_list, list) {
+               if (ptype->type != type)
+                       continue;
+               if (pt_prev)
+                       deliver_skb(skb, pt_prev, dev);
+               pt_prev = ptype;
+       }
+       *pt = pt_prev;
+}
+
 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
 {
        if (!ptype->af_packet_priv || !skb->sk)
@@ -1757,45 +1775,54 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
        struct packet_type *ptype;
        struct sk_buff *skb2 = NULL;
        struct packet_type *pt_prev = NULL;
+       struct list_head *ptype_list = &ptype_all;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(ptype, &ptype_all, list) {
+again:
+       list_for_each_entry_rcu(ptype, ptype_list, list) {
                /* Never send packets back to the socket
                 * they originated from - MvS (miquels@drinkel.ow.org)
                 */
-               if ((ptype->dev == dev || !ptype->dev) &&
-                   (!skb_loop_sk(ptype, skb))) {
-                       if (pt_prev) {
-                               deliver_skb(skb2, pt_prev, skb->dev);
-                               pt_prev = ptype;
-                               continue;
-                       }
+               if (skb_loop_sk(ptype, skb))
+                       continue;
 
-                       skb2 = skb_clone(skb, GFP_ATOMIC);
-                       if (!skb2)
-                               break;
+               if (pt_prev) {
+                       deliver_skb(skb2, pt_prev, skb->dev);
+                       pt_prev = ptype;
+                       continue;
+               }
 
-                       net_timestamp_set(skb2);
+               /* need to clone skb, done only once */
+               skb2 = skb_clone(skb, GFP_ATOMIC);
+               if (!skb2)
+                       goto out_unlock;
 
-                       /* skb->nh should be correctly
-                          set by sender, so that the second statement is
-                          just protection against buggy protocols.
-                        */
-                       skb_reset_mac_header(skb2);
-
-                       if (skb_network_header(skb2) < skb2->data ||
-                           skb_network_header(skb2) > skb_tail_pointer(skb2)) {
-                               net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
-                                                    ntohs(skb2->protocol),
-                                                    dev->name);
-                               skb_reset_network_header(skb2);
-                       }
+               net_timestamp_set(skb2);
 
-                       skb2->transport_header = skb2->network_header;
-                       skb2->pkt_type = PACKET_OUTGOING;
-                       pt_prev = ptype;
+               /* skb->nh should be correctly
+                * set by sender, so that the second statement is
+                * just protection against buggy protocols.
+                */
+               skb_reset_mac_header(skb2);
+
+               if (skb_network_header(skb2) < skb2->data ||
+                   skb_network_header(skb2) > skb_tail_pointer(skb2)) {
+                       net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
+                                            ntohs(skb2->protocol),
+                                            dev->name);
+                       skb_reset_network_header(skb2);
                }
+
+               skb2->transport_header = skb2->network_header;
+               skb2->pkt_type = PACKET_OUTGOING;
+               pt_prev = ptype;
        }
+
+       if (ptype_list == &ptype_all) {
+               ptype_list = &dev->ptype_all;
+               goto again;
+       }
+out_unlock:
        if (pt_prev)
                pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
        rcu_read_unlock();
@@ -2352,7 +2379,6 @@ EXPORT_SYMBOL(skb_checksum_help);
 
 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
 {
-       unsigned int vlan_depth = skb->mac_len;
        __be16 type = skb->protocol;
 
        /* Tunnel gso handlers can set protocol to ethernet. */
@@ -2366,35 +2392,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
                type = eth->h_proto;
        }
 
-       /* if skb->protocol is 802.1Q/AD then the header should already be
-        * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
-        * ETH_HLEN otherwise
-        */
-       if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
-               if (vlan_depth) {
-                       if (WARN_ON(vlan_depth < VLAN_HLEN))
-                               return 0;
-                       vlan_depth -= VLAN_HLEN;
-               } else {
-                       vlan_depth = ETH_HLEN;
-               }
-               do {
-                       struct vlan_hdr *vh;
-
-                       if (unlikely(!pskb_may_pull(skb,
-                                                   vlan_depth + VLAN_HLEN)))
-                               return 0;
-
-                       vh = (struct vlan_hdr *)(skb->data + vlan_depth);
-                       type = vh->h_vlan_encapsulated_proto;
-                       vlan_depth += VLAN_HLEN;
-               } while (type == htons(ETH_P_8021Q) ||
-                        type == htons(ETH_P_8021AD));
-       }
-
-       *depth = vlan_depth;
-
-       return type;
+       return __vlan_get_protocol(skb, type, depth);
 }
 
 /**
@@ -2578,7 +2576,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
        if (skb->encapsulation)
                features &= dev->hw_enc_features;
 
-       if (!vlan_tx_tag_present(skb)) {
+       if (!skb_vlan_tag_present(skb)) {
                if (unlikely(protocol == htons(ETH_P_8021Q) ||
                             protocol == htons(ETH_P_8021AD))) {
                        struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2617,7 +2615,7 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev,
        unsigned int len;
        int rc;
 
-       if (!list_empty(&ptype_all))
+       if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
                dev_queue_xmit_nit(skb, dev);
 
        len = skb->len;
@@ -2659,7 +2657,7 @@ out:
 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
                                          netdev_features_t features)
 {
-       if (vlan_tx_tag_present(skb) &&
+       if (skb_vlan_tag_present(skb) &&
            !vlan_hw_offload_capable(features, skb->vlan_proto))
                skb = __vlan_hwaccel_push_inside(skb);
        return skb;
@@ -3032,6 +3030,8 @@ static inline void ____napi_schedule(struct softnet_data *sd,
 /* One global table that all flow-based protocols share. */
 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
 EXPORT_SYMBOL(rps_sock_flow_table);
+u32 rps_cpu_mask __read_mostly;
+EXPORT_SYMBOL(rps_cpu_mask);
 
 struct static_key rps_needed __read_mostly;
 
@@ -3088,16 +3088,17 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                       struct rps_dev_flow **rflowp)
 {
-       struct netdev_rx_queue *rxqueue;
-       struct rps_map *map;
+       const struct rps_sock_flow_table *sock_flow_table;
+       struct netdev_rx_queue *rxqueue = dev->_rx;
        struct rps_dev_flow_table *flow_table;
-       struct rps_sock_flow_table *sock_flow_table;
+       struct rps_map *map;
        int cpu = -1;
-       u16 tcpu;
+       u32 tcpu;
        u32 hash;
 
        if (skb_rx_queue_recorded(skb)) {
                u16 index = skb_get_rx_queue(skb);
+
                if (unlikely(index >= dev->real_num_rx_queues)) {
                        WARN_ONCE(dev->real_num_rx_queues > 1,
                                  "%s received packet on queue %u, but number "
@@ -3105,39 +3106,40 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                                  dev->name, index, dev->real_num_rx_queues);
                        goto done;
                }
-               rxqueue = dev->_rx + index;
-       } else
-               rxqueue = dev->_rx;
+               rxqueue += index;
+       }
 
+       /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
+
+       flow_table = rcu_dereference(rxqueue->rps_flow_table);
        map = rcu_dereference(rxqueue->rps_map);
-       if (map) {
-               if (map->len == 1 &&
-                   !rcu_access_pointer(rxqueue->rps_flow_table)) {
-                       tcpu = map->cpus[0];
-                       if (cpu_online(tcpu))
-                               cpu = tcpu;
-                       goto done;
-               }
-       } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
+       if (!flow_table && !map)
                goto done;
-       }
 
        skb_reset_network_header(skb);
        hash = skb_get_hash(skb);
        if (!hash)
                goto done;
 
-       flow_table = rcu_dereference(rxqueue->rps_flow_table);
        sock_flow_table = rcu_dereference(rps_sock_flow_table);
        if (flow_table && sock_flow_table) {
-               u16 next_cpu;
                struct rps_dev_flow *rflow;
+               u32 next_cpu;
+               u32 ident;
 
+               /* First check into global flow table if there is a match */
+               ident = sock_flow_table->ents[hash & sock_flow_table->mask];
+               if ((ident ^ hash) & ~rps_cpu_mask)
+                       goto try_rps;
+
+               next_cpu = ident & rps_cpu_mask;
+
+               /* OK, now we know there is a match,
+                * we can look at the local (per receive queue) flow table
+                */
                rflow = &flow_table->flows[hash & flow_table->mask];
                tcpu = rflow->cpu;
 
-               next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask];
-
                /*
                 * If the desired CPU (where last recvmsg was done) is
                 * different from current CPU (one in the rx-queue flow
@@ -3164,6 +3166,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                }
        }
 
+try_rps:
+
        if (map) {
                tcpu = map->cpus[reciprocal_scale(hash, map->len)];
                if (cpu_online(tcpu)) {
@@ -3615,7 +3619,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
        struct packet_type *ptype, *pt_prev;
        rx_handler_func_t *rx_handler;
        struct net_device *orig_dev;
-       struct net_device *null_or_dev;
        bool deliver_exact = false;
        int ret = NET_RX_DROP;
        __be16 type;
@@ -3658,11 +3661,15 @@ another_round:
                goto skip_taps;
 
        list_for_each_entry_rcu(ptype, &ptype_all, list) {
-               if (!ptype->dev || ptype->dev == skb->dev) {
-                       if (pt_prev)
-                               ret = deliver_skb(skb, pt_prev, orig_dev);
-                       pt_prev = ptype;
-               }
+               if (pt_prev)
+                       ret = deliver_skb(skb, pt_prev, orig_dev);
+               pt_prev = ptype;
+       }
+
+       list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
+               if (pt_prev)
+                       ret = deliver_skb(skb, pt_prev, orig_dev);
+               pt_prev = ptype;
        }
 
 skip_taps:
@@ -3676,7 +3683,7 @@ ncls:
        if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
                goto drop;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                if (pt_prev) {
                        ret = deliver_skb(skb, pt_prev, orig_dev);
                        pt_prev = NULL;
@@ -3708,8 +3715,8 @@ ncls:
                }
        }
 
-       if (unlikely(vlan_tx_tag_present(skb))) {
-               if (vlan_tx_tag_get_id(skb))
+       if (unlikely(skb_vlan_tag_present(skb))) {
+               if (skb_vlan_tag_get_id(skb))
                        skb->pkt_type = PACKET_OTHERHOST;
                /* Note: we might in the future use prio bits
                 * and set skb->priority like in vlan_do_receive()
@@ -3718,19 +3725,21 @@ ncls:
                skb->vlan_tci = 0;
        }
 
+       type = skb->protocol;
+
        /* deliver only exact match when indicated */
-       null_or_dev = deliver_exact ? skb->dev : NULL;
+       if (likely(!deliver_exact)) {
+               deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
+                                      &ptype_base[ntohs(type) &
+                                                  PTYPE_HASH_MASK]);
+       }
 
-       type = skb->protocol;
-       list_for_each_entry_rcu(ptype,
-                       &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
-               if (ptype->type == type &&
-                   (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
-                    ptype->dev == orig_dev)) {
-                       if (pt_prev)
-                               ret = deliver_skb(skb, pt_prev, orig_dev);
-                       pt_prev = ptype;
-               }
+       deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
+                              &orig_dev->ptype_specific);
+
+       if (unlikely(skb->dev != orig_dev)) {
+               deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
+                                      &skb->dev->ptype_specific);
        }
 
        if (pt_prev) {
@@ -5323,7 +5332,27 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
-void netdev_adjacent_add_links(struct net_device *dev)
+/**
+ * netdev_bonding_info_change - Dispatch event about slave change
+ * @dev: device
+ * @netdev_bonding_info: info to dispatch
+ *
+ * Send NETDEV_BONDING_INFO to netdev notifiers with info.
+ * The caller must hold the RTNL lock.
+ */
+void netdev_bonding_info_change(struct net_device *dev,
+                               struct netdev_bonding_info *bonding_info)
+{
+       struct netdev_notifier_bonding_info     info;
+
+       memcpy(&info.bonding_info, bonding_info,
+              sizeof(struct netdev_bonding_info));
+       call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
+                                     &info.info);
+}
+EXPORT_SYMBOL(netdev_bonding_info_change);
+
+static void netdev_adjacent_add_links(struct net_device *dev)
 {
        struct netdev_adjacent *iter;
 
@@ -5348,7 +5377,7 @@ void netdev_adjacent_add_links(struct net_device *dev)
        }
 }
 
-void netdev_adjacent_del_links(struct net_device *dev)
+static void netdev_adjacent_del_links(struct net_device *dev)
 {
        struct netdev_adjacent *iter;
 
@@ -6172,13 +6201,16 @@ static int netif_alloc_rx_queues(struct net_device *dev)
 {
        unsigned int i, count = dev->num_rx_queues;
        struct netdev_rx_queue *rx;
+       size_t sz = count * sizeof(*rx);
 
        BUG_ON(count < 1);
 
-       rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
-       if (!rx)
-               return -ENOMEM;
-
+       rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+       if (!rx) {
+               rx = vzalloc(sz);
+               if (!rx)
+                       return -ENOMEM;
+       }
        dev->_rx = rx;
 
        for (i = 0; i < count; i++)
@@ -6576,6 +6608,8 @@ void netdev_run_todo(void)
 
                /* paranoia */
                BUG_ON(netdev_refcnt_read(dev));
+               BUG_ON(!list_empty(&dev->ptype_all));
+               BUG_ON(!list_empty(&dev->ptype_specific));
                WARN_ON(rcu_access_pointer(dev->ip_ptr));
                WARN_ON(rcu_access_pointer(dev->ip6_ptr));
                WARN_ON(dev->dn_ptr);
@@ -6656,7 +6690,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
        if (!queue)
                return NULL;
        netdev_init_one_queue(dev, queue, NULL);
-       queue->qdisc = &noop_qdisc;
+       RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
        queue->qdisc_sleeping = &noop_qdisc;
        rcu_assign_pointer(dev->ingress_queue, queue);
 #endif
@@ -6758,6 +6792,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
        INIT_LIST_HEAD(&dev->adj_list.lower);
        INIT_LIST_HEAD(&dev->all_adj_list.upper);
        INIT_LIST_HEAD(&dev->all_adj_list.lower);
+       INIT_LIST_HEAD(&dev->ptype_all);
+       INIT_LIST_HEAD(&dev->ptype_specific);
        dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
        setup(dev);
 
@@ -6808,7 +6844,7 @@ void free_netdev(struct net_device *dev)
 
        netif_free_tx_queues(dev);
 #ifdef CONFIG_SYSFS
-       kfree(dev->_rx);
+       kvfree(dev->_rx);
 #endif
 
        kfree(rcu_dereference_protected(dev->ingress_queue, 1));
@@ -7072,10 +7108,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
                oldsd->output_queue = NULL;
                oldsd->output_queue_tailp = &oldsd->output_queue;
        }
-       /* Append NAPI poll list from offline CPU. */
-       if (!list_empty(&oldsd->poll_list)) {
-               list_splice_init(&oldsd->poll_list, &sd->poll_list);
-               raise_softirq_irqoff(NET_RX_SOFTIRQ);
+       /* Append NAPI poll list from offline CPU, with one exception :
+        * process_backlog() must be called by cpu owning percpu backlog.
+        * We properly handle process_queue & input_pkt_queue later.
+        */
+       while (!list_empty(&oldsd->poll_list)) {
+               struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
+                                                           struct napi_struct,
+                                                           poll_list);
+
+               list_del_init(&napi->poll_list);
+               if (napi->poll == process_backlog)
+                       napi->state = 0;
+               else
+                       ____napi_schedule(sd, napi);
        }
 
        raise_softirq_irqoff(NET_TX_SOFTIRQ);
@@ -7086,7 +7132,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
                netif_rx_internal(skb);
                input_queue_head_incr(oldsd);
        }
-       while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
+       while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
                netif_rx_internal(skb);
                input_queue_head_incr(oldsd);
        }
index 550892cd6b3ff4ec1325bc1094bb9f217409c7e2..91f74f3eb20475439214d9692f9b17c8124a9c3a 100644 (file)
@@ -1597,20 +1597,31 @@ static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
        return err;
 }
 
+static int __ethtool_get_module_info(struct net_device *dev,
+                                    struct ethtool_modinfo *modinfo)
+{
+       const struct ethtool_ops *ops = dev->ethtool_ops;
+       struct phy_device *phydev = dev->phydev;
+
+       if (phydev && phydev->drv && phydev->drv->module_info)
+               return phydev->drv->module_info(phydev, modinfo);
+
+       if (ops->get_module_info)
+               return ops->get_module_info(dev, modinfo);
+
+       return -EOPNOTSUPP;
+}
+
 static int ethtool_get_module_info(struct net_device *dev,
                                   void __user *useraddr)
 {
        int ret;
        struct ethtool_modinfo modinfo;
-       const struct ethtool_ops *ops = dev->ethtool_ops;
-
-       if (!ops->get_module_info)
-               return -EOPNOTSUPP;
 
        if (copy_from_user(&modinfo, useraddr, sizeof(modinfo)))
                return -EFAULT;
 
-       ret = ops->get_module_info(dev, &modinfo);
+       ret = __ethtool_get_module_info(dev, &modinfo);
        if (ret)
                return ret;
 
@@ -1620,21 +1631,33 @@ static int ethtool_get_module_info(struct net_device *dev,
        return 0;
 }
 
+static int __ethtool_get_module_eeprom(struct net_device *dev,
+                                      struct ethtool_eeprom *ee, u8 *data)
+{
+       const struct ethtool_ops *ops = dev->ethtool_ops;
+       struct phy_device *phydev = dev->phydev;
+
+       if (phydev && phydev->drv && phydev->drv->module_eeprom)
+               return phydev->drv->module_eeprom(phydev, ee, data);
+
+       if (ops->get_module_eeprom)
+               return ops->get_module_eeprom(dev, ee, data);
+
+       return -EOPNOTSUPP;
+}
+
 static int ethtool_get_module_eeprom(struct net_device *dev,
                                     void __user *useraddr)
 {
        int ret;
        struct ethtool_modinfo modinfo;
-       const struct ethtool_ops *ops = dev->ethtool_ops;
-
-       if (!ops->get_module_info || !ops->get_module_eeprom)
-               return -EOPNOTSUPP;
 
-       ret = ops->get_module_info(dev, &modinfo);
+       ret = __ethtool_get_module_info(dev, &modinfo);
        if (ret)
                return ret;
 
-       return ethtool_get_any_eeprom(dev, useraddr, ops->get_module_eeprom,
+       return ethtool_get_any_eeprom(dev, useraddr,
+                                     __ethtool_get_module_eeprom,
                                      modinfo.eeprom_len);
 }
 
index 185c341fafbd079714fe3a563b8209d5c5f7ead4..44706e81b2e03df3e9d39c1cd76879a4ede48d1e 100644 (file)
@@ -609,7 +609,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
        if (ops->fill(rule, skb, frh) < 0)
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
index 45084938c403b82b23d45ed66d9871348baeadc7..2c35c02a931e227fa368cd346873596d4b037a3d 100644 (file)
@@ -178,6 +178,20 @@ ipv6:
                        return false;
                }
        }
+       case htons(ETH_P_TIPC): {
+               struct {
+                       __be32 pre[3];
+                       __be32 srcnode;
+               } *hdr, _hdr;
+               hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
+               if (!hdr)
+                       return false;
+               flow->src = hdr->srcnode;
+               flow->dst = 0;
+               flow->n_proto = proto;
+               flow->thoff = (u16)nhoff;
+               return true;
+       }
        case htons(ETH_P_FCOE):
                flow->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
                /* fall through */
@@ -408,7 +422,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
        dev_maps = rcu_dereference(dev->xps_maps);
        if (dev_maps) {
                map = rcu_dereference(
-                   dev_maps->cpu_map[raw_smp_processor_id()]);
+                   dev_maps->cpu_map[skb->sender_cpu - 1]);
                if (map) {
                        if (map->len == 1)
                                queue_index = map->queues[0];
@@ -454,6 +468,11 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
 {
        int queue_index = 0;
 
+#ifdef CONFIG_XPS
+       if (skb->sender_cpu == 0)
+               skb->sender_cpu = raw_smp_processor_id() + 1;
+#endif
+
        if (dev->real_num_tx_queues != 1) {
                const struct net_device_ops *ops = dev->netdev_ops;
                if (ops->ndo_select_queue)
diff --git a/net/core/iovec.c b/net/core/iovec.c
deleted file mode 100644 (file)
index dcbe98b..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- *     iovec manipulation routines.
- *
- *
- *             This program is free software; you can redistribute it and/or
- *             modify it under the terms of the GNU General Public License
- *             as published by the Free Software Foundation; either version
- *             2 of the License, or (at your option) any later version.
- *
- *     Fixes:
- *             Andrew Lunn     :       Errors in iovec copying.
- *             Pedro Roque     :       Added memcpy_fromiovecend and
- *                                     csum_..._fromiovecend.
- *             Andi Kleen      :       fixed error handling for 2.1
- *             Alexey Kuznetsov:       2.1 optimisations
- *             Andi Kleen      :       Fix csum*fromiovecend for IPv6.
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/in6.h>
-#include <asm/uaccess.h>
-#include <asm/byteorder.h>
-#include <net/checksum.h>
-#include <net/sock.h>
-
-/*
- *     And now for the all-in-one: copy and checksum from a user iovec
- *     directly to a datagram
- *     Calls to csum_partial but the last must be in 32 bit chunks
- *
- *     ip_build_xmit must ensure that when fragmenting only the last
- *     call to this function will be unaligned also.
- */
-int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
-                                int offset, unsigned int len, __wsum *csump)
-{
-       __wsum csum = *csump;
-       int partial_cnt = 0, err = 0;
-
-       /* Skip over the finished iovecs */
-       while (offset >= iov->iov_len) {
-               offset -= iov->iov_len;
-               iov++;
-       }
-
-       while (len > 0) {
-               u8 __user *base = iov->iov_base + offset;
-               int copy = min_t(unsigned int, len, iov->iov_len - offset);
-
-               offset = 0;
-
-               /* There is a remnant from previous iov. */
-               if (partial_cnt) {
-                       int par_len = 4 - partial_cnt;
-
-                       /* iov component is too short ... */
-                       if (par_len > copy) {
-                               if (copy_from_user(kdata, base, copy))
-                                       goto out_fault;
-                               kdata += copy;
-                               base += copy;
-                               partial_cnt += copy;
-                               len -= copy;
-                               iov++;
-                               if (len)
-                                       continue;
-                               *csump = csum_partial(kdata - partial_cnt,
-                                                        partial_cnt, csum);
-                               goto out;
-                       }
-                       if (copy_from_user(kdata, base, par_len))
-                               goto out_fault;
-                       csum = csum_partial(kdata - partial_cnt, 4, csum);
-                       kdata += par_len;
-                       base  += par_len;
-                       copy  -= par_len;
-                       len   -= par_len;
-                       partial_cnt = 0;
-               }
-
-               if (len > copy) {
-                       partial_cnt = copy % 4;
-                       if (partial_cnt) {
-                               copy -= partial_cnt;
-                               if (copy_from_user(kdata + copy, base + copy,
-                                               partial_cnt))
-                                       goto out_fault;
-                       }
-               }
-
-               if (copy) {
-                       csum = csum_and_copy_from_user(base, kdata, copy,
-                                                       csum, &err);
-                       if (err)
-                               goto out;
-               }
-               len   -= copy + partial_cnt;
-               kdata += copy + partial_cnt;
-               iov++;
-       }
-       *csump = csum;
-out:
-       return err;
-
-out_fault:
-       err = -EFAULT;
-       goto out;
-}
-EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
-
-unsigned long iov_pages(const struct iovec *iov, int offset,
-                       unsigned long nr_segs)
-{
-       unsigned long seg, base;
-       int pages = 0, len, size;
-
-       while (nr_segs && (offset >= iov->iov_len)) {
-               offset -= iov->iov_len;
-               ++iov;
-               --nr_segs;
-       }
-
-       for (seg = 0; seg < nr_segs; seg++) {
-               base = (unsigned long)iov[seg].iov_base + offset;
-               len = iov[seg].iov_len - offset;
-               size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
-               pages += size;
-               offset = 0;
-       }
-
-       return pages;
-}
-EXPORT_SYMBOL(iov_pages);
index 8e38f17288d3c5a475471b0e56e339d9b6d5bf9e..70fe9e10ac867f495086810dc6ea619f69d59368 100644 (file)
@@ -1884,7 +1884,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
                goto nla_put_failure;
 
        read_unlock_bh(&tbl->lock);
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        read_unlock_bh(&tbl->lock);
@@ -1917,7 +1918,8 @@ static int neightbl_fill_param_info(struct sk_buff *skb,
                goto errout;
 
        read_unlock_bh(&tbl->lock);
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 errout:
        read_unlock_bh(&tbl->lock);
        nlmsg_cancel(skb, nlh);
@@ -2043,6 +2045,12 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
                        case NDTPA_BASE_REACHABLE_TIME:
                                NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
                                              nla_get_msecs(tbp[i]));
+                               /* update reachable_time as well, otherwise, the change will
+                                * only be effective after the next time neigh_periodic_work
+                                * decides to recompute it (can be multiple minutes)
+                                */
+                               p->reachable_time =
+                                       neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
                                break;
                        case NDTPA_GC_STALETIME:
                                NEIGH_VAR_SET(p, GC_STALETIME,
@@ -2120,7 +2128,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
 
                if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
                                       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
-                                      NLM_F_MULTI) <= 0)
+                                      NLM_F_MULTI) < 0)
                        break;
 
                nidx = 0;
@@ -2136,7 +2144,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
                                                     NETLINK_CB(cb->skb).portid,
                                                     cb->nlh->nlmsg_seq,
                                                     RTM_NEWNEIGHTBL,
-                                                    NLM_F_MULTI) <= 0)
+                                                    NLM_F_MULTI) < 0)
                                goto out;
                next:
                        nidx++;
@@ -2196,7 +2204,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
            nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -2226,7 +2235,8 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
        if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -2264,7 +2274,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
                        if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
                                            cb->nlh->nlmsg_seq,
                                            RTM_NEWNEIGH,
-                                           NLM_F_MULTI) <= 0) {
+                                           NLM_F_MULTI) < 0) {
                                rc = -1;
                                goto out;
                        }
@@ -2301,7 +2311,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
                        if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
                                            cb->nlh->nlmsg_seq,
                                            RTM_NEWNEIGH,
-                                           NLM_F_MULTI, tbl) <= 0) {
+                                           NLM_F_MULTI, tbl) < 0) {
                                read_unlock_bh(&tbl->lock);
                                rc = -1;
                                goto out;
@@ -2921,6 +2931,31 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
        return ret;
 }
 
+static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
+                                         void __user *buffer,
+                                         size_t *lenp, loff_t *ppos)
+{
+       struct neigh_parms *p = ctl->extra2;
+       int ret;
+
+       if (strcmp(ctl->procname, "base_reachable_time") == 0)
+               ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
+       else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
+               ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
+       else
+               ret = -1;
+
+       if (write && ret == 0) {
+               /* update reachable_time as well, otherwise, the change will
+                * only be effective after the next time neigh_periodic_work
+                * decides to recompute it
+                */
+               p->reachable_time =
+                       neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
+       }
+       return ret;
+}
+
 #define NEIGH_PARMS_DATA_OFFSET(index) \
        (&((struct neigh_parms *) 0)->data[index])
 
@@ -3047,6 +3082,19 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
                t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
                /* ReachableTime (in milliseconds) */
                t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
+       } else {
+               /* Those handlers will update p->reachable_time after
+                * base_reachable_time(_ms) is set to ensure the new timer starts being
+                * applied after the next neighbour update instead of waiting for
+                * neigh_periodic_work to update its value (can be multiple minutes)
+                * So any handler that replaces them should do this as well
+                */
+               /* ReachableTime */
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
+                       neigh_proc_base_reachable_time;
+               /* ReachableTime (in milliseconds) */
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
+                       neigh_proc_base_reachable_time;
        }
 
        /* Don't export sysctls to unprivileged users */
index ce780c722e48ca2bf35534895ef0b7dc0a9541d5..cb5290b8c428c5c348b25d842ab9cf3797b70eba 100644 (file)
 #include <linux/file.h>
 #include <linux/export.h>
 #include <linux/user_namespace.h>
+#include <linux/net_namespace.h>
+#include <linux/rtnetlink.h>
+#include <net/sock.h>
+#include <net/netlink.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 
@@ -144,6 +148,78 @@ static void ops_free_list(const struct pernet_operations *ops,
        }
 }
 
+static int alloc_netid(struct net *net, struct net *peer, int reqid)
+{
+       int min = 0, max = 0;
+
+       ASSERT_RTNL();
+
+       if (reqid >= 0) {
+               min = reqid;
+               max = reqid + 1;
+       }
+
+       return idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL);
+}
+
+/* This function is used by idr_for_each(). If net is equal to peer, the
+ * function returns the id so that idr_for_each() stops. Because we cannot
+ * returns the id 0 (idr_for_each() will not stop), we return the magic value
+ * NET_ID_ZERO (-1) for it.
+ */
+#define NET_ID_ZERO -1
+static int net_eq_idr(int id, void *net, void *peer)
+{
+       if (net_eq(net, peer))
+               return id ? : NET_ID_ZERO;
+       return 0;
+}
+
+static int __peernet2id(struct net *net, struct net *peer, bool alloc)
+{
+       int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
+
+       ASSERT_RTNL();
+
+       /* Magic value for id 0. */
+       if (id == NET_ID_ZERO)
+               return 0;
+       if (id > 0)
+               return id;
+
+       if (alloc)
+               return alloc_netid(net, peer, -1);
+
+       return -ENOENT;
+}
+
+/* This function returns the id of a peer netns. If no id is assigned, one will
+ * be allocated and returned.
+ */
+int peernet2id(struct net *net, struct net *peer)
+{
+       int id = __peernet2id(net, peer, true);
+
+       return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
+}
+EXPORT_SYMBOL(peernet2id);
+
+struct net *get_net_ns_by_id(struct net *net, int id)
+{
+       struct net *peer;
+
+       if (id < 0)
+               return NULL;
+
+       rcu_read_lock();
+       peer = idr_find(&net->netns_ids, id);
+       if (peer)
+               get_net(peer);
+       rcu_read_unlock();
+
+       return peer;
+}
+
 /*
  * setup_net runs the initializers for the network namespace object.
  */
@@ -158,6 +234,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
        atomic_set(&net->passive, 1);
        net->dev_base_seq = 1;
        net->user_ns = user_ns;
+       idr_init(&net->netns_ids);
 
 #ifdef NETNS_REFCNT_DEBUG
        atomic_set(&net->use_count, 0);
@@ -288,6 +365,14 @@ static void cleanup_net(struct work_struct *work)
        list_for_each_entry(net, &net_kill_list, cleanup_list) {
                list_del_rcu(&net->list);
                list_add_tail(&net->exit_list, &net_exit_list);
+               for_each_net(tmp) {
+                       int id = __peernet2id(tmp, net, false);
+
+                       if (id >= 0)
+                               idr_remove(&tmp->netns_ids, id);
+               }
+               idr_destroy(&net->netns_ids);
+
        }
        rtnl_unlock();
 
@@ -361,6 +446,7 @@ struct net *get_net_ns_by_fd(int fd)
        return ERR_PTR(-EINVAL);
 }
 #endif
+EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
 
 struct net *get_net_ns_by_pid(pid_t pid)
 {
@@ -402,6 +488,130 @@ static struct pernet_operations __net_initdata net_ns_ops = {
        .exit = net_ns_net_exit,
 };
 
+static struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
+       [NETNSA_NONE]           = { .type = NLA_UNSPEC },
+       [NETNSA_NSID]           = { .type = NLA_S32 },
+       [NETNSA_PID]            = { .type = NLA_U32 },
+       [NETNSA_FD]             = { .type = NLA_U32 },
+};
+
+static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       struct net *net = sock_net(skb->sk);
+       struct nlattr *tb[NETNSA_MAX + 1];
+       struct net *peer;
+       int nsid, err;
+
+       err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
+                         rtnl_net_policy);
+       if (err < 0)
+               return err;
+       if (!tb[NETNSA_NSID])
+               return -EINVAL;
+       nsid = nla_get_s32(tb[NETNSA_NSID]);
+
+       if (tb[NETNSA_PID])
+               peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
+       else if (tb[NETNSA_FD])
+               peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
+       else
+               return -EINVAL;
+       if (IS_ERR(peer))
+               return PTR_ERR(peer);
+
+       if (__peernet2id(net, peer, false) >= 0) {
+               err = -EEXIST;
+               goto out;
+       }
+
+       err = alloc_netid(net, peer, nsid);
+       if (err > 0)
+               err = 0;
+out:
+       put_net(peer);
+       return err;
+}
+
+static int rtnl_net_get_size(void)
+{
+       return NLMSG_ALIGN(sizeof(struct rtgenmsg))
+              + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
+              ;
+}
+
+static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
+                        int cmd, struct net *net, struct net *peer)
+{
+       struct nlmsghdr *nlh;
+       struct rtgenmsg *rth;
+       int id;
+
+       ASSERT_RTNL();
+
+       nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
+       if (!nlh)
+               return -EMSGSIZE;
+
+       rth = nlmsg_data(nlh);
+       rth->rtgen_family = AF_UNSPEC;
+
+       id = __peernet2id(net, peer, false);
+       if  (id < 0)
+               id = NETNSA_NSID_NOT_ASSIGNED;
+       if (nla_put_s32(skb, NETNSA_NSID, id))
+               goto nla_put_failure;
+
+       nlmsg_end(skb, nlh);
+       return 0;
+
+nla_put_failure:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
+}
+
+static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       struct net *net = sock_net(skb->sk);
+       struct nlattr *tb[NETNSA_MAX + 1];
+       struct sk_buff *msg;
+       int err = -ENOBUFS;
+       struct net *peer;
+
+       err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
+                         rtnl_net_policy);
+       if (err < 0)
+               return err;
+       if (tb[NETNSA_PID])
+               peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
+       else if (tb[NETNSA_FD])
+               peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
+       else
+               return -EINVAL;
+
+       if (IS_ERR(peer))
+               return PTR_ERR(peer);
+
+       msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
+       if (!msg) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
+                           RTM_GETNSID, net, peer);
+       if (err < 0)
+               goto err_out;
+
+       err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
+       goto out;
+
+err_out:
+       nlmsg_free(msg);
+out:
+       put_net(peer);
+       return err;
+}
+
 static int __init net_ns_init(void)
 {
        struct net_generic *ng;
@@ -435,6 +645,9 @@ static int __init net_ns_init(void)
 
        register_pernet_subsys(&net_ns_ops);
 
+       rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
+       rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, NULL, NULL);
+
        return 0;
 }
 
index e0ad5d16c9c56947163d81201af07e26d1d3017c..c126a878c47c95587cd4713ee500ec0e9577af39 100644 (file)
@@ -77,7 +77,7 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
 
        features = netif_skb_features(skb);
 
-       if (vlan_tx_tag_present(skb) &&
+       if (skb_vlan_tag_present(skb) &&
            !vlan_hw_offload_capable(features, skb->vlan_proto)) {
                skb = __vlan_hwaccel_push_inside(skb);
                if (unlikely(!skb)) {
index 9cf6fe9ddc0c99e189916dee672d16e6c4efe19a..5dad4f782f03f8556e0ed184158657f902cb119c 100644 (file)
@@ -50,6 +50,7 @@
 #include <net/arp.h>
 #include <net/route.h>
 #include <net/udp.h>
+#include <net/tcp.h>
 #include <net/sock.h>
 #include <net/pkt_sched.h>
 #include <net/fib_rules.h>
@@ -669,9 +670,19 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
 
        for (i = 0; i < RTAX_MAX; i++) {
                if (metrics[i]) {
+                       if (i == RTAX_CC_ALGO - 1) {
+                               char tmp[TCP_CA_NAME_MAX], *name;
+
+                               name = tcp_ca_get_name_by_key(metrics[i], tmp);
+                               if (!name)
+                                       continue;
+                               if (nla_put_string(skb, i + 1, name))
+                                       goto nla_put_failure;
+                       } else {
+                               if (nla_put_u32(skb, i + 1, metrics[i]))
+                                       goto nla_put_failure;
+                       }
                        valid++;
-                       if (nla_put_u32(skb, i+1, metrics[i]))
-                               goto nla_put_failure;
                }
        }
 
@@ -864,6 +875,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
               + nla_total_size(1) /* IFLA_OPERSTATE */
               + nla_total_size(1) /* IFLA_LINKMODE */
               + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
+              + nla_total_size(4) /* IFLA_LINK_NETNSID */
               + nla_total_size(ext_filter_mask
                                & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
               + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
@@ -1158,6 +1170,18 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                        goto nla_put_failure;
        }
 
+       if (dev->rtnl_link_ops &&
+           dev->rtnl_link_ops->get_link_net) {
+               struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
+
+               if (!net_eq(dev_net(dev), link_net)) {
+                       int id = peernet2id(dev_net(dev), link_net);
+
+                       if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
+                               goto nla_put_failure;
+               }
+       }
+
        if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
                goto nla_put_failure;
 
@@ -1188,7 +1212,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
 
        nla_nest_end(skb, af_spec);
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -1223,6 +1248,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
        [IFLA_PHYS_PORT_ID]     = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
        [IFLA_CARRIER_CHANGES]  = { .type = NLA_U32 },  /* ignored */
        [IFLA_PHYS_SWITCH_ID]   = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
+       [IFLA_LINK_NETNSID]     = { .type = NLA_S32 },
 };
 
 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -1315,7 +1341,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                         */
                        WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
 
-                       if (err <= 0)
+                       if (err < 0)
                                goto out;
 
                        nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -1996,7 +2022,7 @@ replay:
                struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 0];
                struct nlattr **data = NULL;
                struct nlattr **slave_data = NULL;
-               struct net *dest_net;
+               struct net *dest_net, *link_net = NULL;
 
                if (ops) {
                        if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
@@ -2102,7 +2128,18 @@ replay:
                if (IS_ERR(dest_net))
                        return PTR_ERR(dest_net);
 
-               dev = rtnl_create_link(dest_net, ifname, name_assign_type, ops, tb);
+               if (tb[IFLA_LINK_NETNSID]) {
+                       int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
+
+                       link_net = get_net_ns_by_id(dest_net, id);
+                       if (!link_net) {
+                               err =  -EINVAL;
+                               goto out;
+                       }
+               }
+
+               dev = rtnl_create_link(link_net ? : dest_net, ifname,
+                                      name_assign_type, ops, tb);
                if (IS_ERR(dev)) {
                        err = PTR_ERR(dev);
                        goto out;
@@ -2111,7 +2148,7 @@ replay:
                dev->ifindex = ifm->ifi_index;
 
                if (ops->newlink) {
-                       err = ops->newlink(net, dev, tb, data);
+                       err = ops->newlink(link_net ? : net, dev, tb, data);
                        /* Drivers should call free_netdev() in ->destructor
                         * and unregister it on failure after registration
                         * so that device could be finally freed in rtnl_unlock.
@@ -2130,9 +2167,19 @@ replay:
                        }
                }
                err = rtnl_configure_link(dev, ifm);
-               if (err < 0)
+               if (err < 0) {
                        unregister_netdevice(dev);
+                       goto out;
+               }
+
+               if (link_net) {
+                       err = dev_change_net_namespace(dev, dest_net, ifname);
+                       if (err < 0)
+                               unregister_netdevice(dev);
+               }
 out:
+               if (link_net)
+                       put_net(link_net);
                put_net(dest_net);
                return err;
        }
@@ -2315,7 +2362,8 @@ static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
        if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -2698,10 +2746,11 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                                         idx);
                }
 
-               idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
                if (dev->netdev_ops->ndo_fdb_dump)
-                       idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, bdev, dev,
+                       idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, NULL,
                                                            idx);
+               else
+                       idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
 
                cops = NULL;
        }
@@ -2797,7 +2846,8 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
 
        nla_nest_end(skb, protinfo);
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
        return -EMSGSIZE;
@@ -2868,39 +2918,35 @@ static inline size_t bridge_nlmsg_size(void)
                + nla_total_size(sizeof(u16));  /* IFLA_BRIDGE_MODE */
 }
 
-static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
+static int rtnl_bridge_notify(struct net_device *dev)
 {
        struct net *net = dev_net(dev);
-       struct net_device *br_dev = netdev_master_upper_dev_get(dev);
        struct sk_buff *skb;
        int err = -EOPNOTSUPP;
 
+       if (!dev->netdev_ops->ndo_bridge_getlink)
+               return 0;
+
        skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
        if (!skb) {
                err = -ENOMEM;
                goto errout;
        }
 
-       if ((!flags || (flags & BRIDGE_FLAGS_MASTER)) &&
-           br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
-               err = br_dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
-               if (err < 0)
-                       goto errout;
-       }
+       err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
+       if (err < 0)
+               goto errout;
 
-       if ((flags & BRIDGE_FLAGS_SELF) &&
-           dev->netdev_ops->ndo_bridge_getlink) {
-               err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
-               if (err < 0)
-                       goto errout;
-       }
+       if (!skb->len)
+               goto errout;
 
        rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
        return 0;
 errout:
        WARN_ON(err == -EMSGSIZE);
        kfree_skb(skb);
-       rtnl_set_sk_err(net, RTNLGRP_LINK, err);
+       if (err)
+               rtnl_set_sk_err(net, RTNLGRP_LINK, err);
        return err;
 }
 
@@ -2911,7 +2957,7 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct net_device *dev;
        struct nlattr *br_spec, *attr = NULL;
        int rem, err = -EOPNOTSUPP;
-       u16 oflags, flags = 0;
+       u16 flags = 0;
        bool have_flags = false;
 
        if (nlmsg_len(nlh) < sizeof(*ifm))
@@ -2941,8 +2987,6 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
                }
        }
 
-       oflags = flags;
-
        if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
                struct net_device *br_dev = netdev_master_upper_dev_get(dev);
 
@@ -2951,7 +2995,7 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
                        goto out;
                }
 
-               err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh);
+               err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags);
                if (err)
                        goto out;
 
@@ -2962,17 +3006,20 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
                if (!dev->netdev_ops->ndo_bridge_setlink)
                        err = -EOPNOTSUPP;
                else
-                       err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh);
-
-               if (!err)
+                       err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
+                                                                 flags);
+               if (!err) {
                        flags &= ~BRIDGE_FLAGS_SELF;
+
+                       /* Generate event to notify upper layer of bridge
+                        * change
+                        */
+                       err = rtnl_bridge_notify(dev);
+               }
        }
 
        if (have_flags)
                memcpy(nla_data(attr), &flags, sizeof(flags));
-       /* Generate event to notify upper layer of bridge change */
-       if (!err)
-               err = rtnl_bridge_notify(dev, oflags);
 out:
        return err;
 }
@@ -2984,7 +3031,7 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct net_device *dev;
        struct nlattr *br_spec, *attr = NULL;
        int rem, err = -EOPNOTSUPP;
-       u16 oflags, flags = 0;
+       u16 flags = 0;
        bool have_flags = false;
 
        if (nlmsg_len(nlh) < sizeof(*ifm))
@@ -3014,8 +3061,6 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
                }
        }
 
-       oflags = flags;
-
        if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
                struct net_device *br_dev = netdev_master_upper_dev_get(dev);
 
@@ -3024,7 +3069,7 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
                        goto out;
                }
 
-               err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh);
+               err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
                if (err)
                        goto out;
 
@@ -3035,17 +3080,21 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
                if (!dev->netdev_ops->ndo_bridge_dellink)
                        err = -EOPNOTSUPP;
                else
-                       err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh);
+                       err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
+                                                                 flags);
 
-               if (!err)
+               if (!err) {
                        flags &= ~BRIDGE_FLAGS_SELF;
+
+                       /* Generate event to notify upper layer of bridge
+                        * change
+                        */
+                       err = rtnl_bridge_notify(dev);
+               }
        }
 
        if (have_flags)
                memcpy(nla_data(attr), &flags, sizeof(flags));
-       /* Generate event to notify upper layer of bridge change */
-       if (!err)
-               err = rtnl_bridge_notify(dev, oflags);
 out:
        return err;
 }
@@ -3135,6 +3184,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
        case NETDEV_UNREGISTER_FINAL:
        case NETDEV_RELEASE:
        case NETDEV_JOIN:
+       case NETDEV_BONDING_INFO:
                break;
        default:
                rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
index 5a2a2e887a12ee3d813fa81b0b26ac8c1f0c24d8..88c613eab142962dc44f2075378fce0b94349e8e 100644 (file)
@@ -74,6 +74,8 @@
 #include <asm/uaccess.h>
 #include <trace/events/skb.h>
 #include <linux/highmem.h>
+#include <linux/capability.h>
+#include <linux/user_namespace.h>
 
 struct kmem_cache *skbuff_head_cache __read_mostly;
 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
@@ -823,6 +825,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 #ifdef CONFIG_NET_RX_BUSY_POLL
        CHECK_SKB_FIELD(napi_id);
 #endif
+#ifdef CONFIG_XPS
+       CHECK_SKB_FIELD(sender_cpu);
+#endif
 #ifdef CONFIG_NET_SCHED
        CHECK_SKB_FIELD(tc_index);
 #ifdef CONFIG_NET_CLS_ACT
@@ -3690,11 +3695,28 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
                kfree_skb(skb);
 }
 
+static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
+{
+       bool ret;
+
+       if (likely(sysctl_tstamp_allow_data || tsonly))
+               return true;
+
+       read_lock_bh(&sk->sk_callback_lock);
+       ret = sk->sk_socket && sk->sk_socket->file &&
+             file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
+       read_unlock_bh(&sk->sk_callback_lock);
+       return ret;
+}
+
 void skb_complete_tx_timestamp(struct sk_buff *skb,
                               struct skb_shared_hwtstamps *hwtstamps)
 {
        struct sock *sk = skb->sk;
 
+       if (!skb_may_tx_timestamp(sk, false))
+               return;
+
        /* take a reference to prevent skb_orphan() from freeing the socket */
        sock_hold(sk);
 
@@ -3710,19 +3732,28 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
                     struct sock *sk, int tstype)
 {
        struct sk_buff *skb;
+       bool tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
 
-       if (!sk)
+       if (!sk || !skb_may_tx_timestamp(sk, tsonly))
                return;
 
-       if (hwtstamps)
-               *skb_hwtstamps(orig_skb) = *hwtstamps;
+       if (tsonly)
+               skb = alloc_skb(0, GFP_ATOMIC);
        else
-               orig_skb->tstamp = ktime_get_real();
-
-       skb = skb_clone(orig_skb, GFP_ATOMIC);
+               skb = skb_clone(orig_skb, GFP_ATOMIC);
        if (!skb)
                return;
 
+       if (tsonly) {
+               skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags;
+               skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
+       }
+
+       if (hwtstamps)
+               *skb_hwtstamps(skb) = *hwtstamps;
+       else
+               skb->tstamp = ktime_get_real();
+
        __skb_complete_tx_timestamp(skb, sk, tstype);
 }
 EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
@@ -4141,6 +4172,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
        skb->ignore_df = 0;
        skb_dst_drop(skb);
        skb->mark = 0;
+       skb->sender_cpu = 0;
        skb_init_secmark(skb);
        secpath_reset(skb);
        nf_reset(skb);
@@ -4197,7 +4229,7 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
        struct vlan_hdr *vhdr;
        u16 vlan_tci;
 
-       if (unlikely(vlan_tx_tag_present(skb))) {
+       if (unlikely(skb_vlan_tag_present(skb))) {
                /* vlan_tci is already set-up so leave this for another time */
                return skb;
        }
@@ -4283,7 +4315,7 @@ int skb_vlan_pop(struct sk_buff *skb)
        __be16 vlan_proto;
        int err;
 
-       if (likely(vlan_tx_tag_present(skb))) {
+       if (likely(skb_vlan_tag_present(skb))) {
                skb->vlan_tci = 0;
        } else {
                if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
@@ -4313,7 +4345,7 @@ EXPORT_SYMBOL(skb_vlan_pop);
 
 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
 {
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                unsigned int offset = skb->data - skb_mac_header(skb);
                int err;
 
@@ -4323,7 +4355,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
                 */
                __skb_push(skb, offset);
                err = __vlan_insert_tag(skb, skb->vlan_proto,
-                                       vlan_tx_tag_get(skb));
+                                       skb_vlan_tag_get(skb));
                if (err)
                        return err;
                skb->protocol = skb->vlan_proto;
index 1c7a33db1314f3e2a7ded154b9ea498023d2d2e4..93c8b20c91e496648f7f2e5c769c062ca2dd679d 100644 (file)
@@ -325,6 +325,8 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
 EXPORT_SYMBOL(sysctl_optmem_max);
 
+int sysctl_tstamp_allow_data __read_mostly = 1;
+
 struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
 EXPORT_SYMBOL_GPL(memalloc_socks);
 
@@ -840,6 +842,7 @@ set_rcvbuf:
                        ret = -EINVAL;
                        break;
                }
+
                if (val & SOF_TIMESTAMPING_OPT_ID &&
                    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
                        if (sk->sk_protocol == IPPROTO_TCP) {
index 31baba2a71ce15e49450f69dae81e7d3be1ff3f2..eaa51ddf2368747c21399bf44f5a1993c0a0e39b 100644 (file)
@@ -52,7 +52,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
 
        if (write) {
                if (size) {
-                       if (size > 1<<30) {
+                       if (size > 1<<29) {
                                /* Enforce limit to prevent overflow */
                                mutex_unlock(&sock_flow_mutex);
                                return -EINVAL;
@@ -65,7 +65,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
                                        mutex_unlock(&sock_flow_mutex);
                                        return -ENOMEM;
                                }
-
+                               rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1;
                                sock_table->mask = size - 1;
                        } else
                                sock_table = orig_sock_table;
@@ -321,6 +321,15 @@ static struct ctl_table net_core_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "tstamp_allow_data",
+               .data           = &sysctl_tstamp_allow_data,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one
+       },
 #ifdef CONFIG_RPS
        {
                .procname       = "rps_sock_flow_entries",
index 4400da7739dafb3c3d42e52829086b9d3e418505..b2c26b081134a0c5b3697bfbad0148a029a5b95e 100644 (file)
@@ -702,7 +702,8 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
             nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
             nla_put_u32(skb, IFA_FLAGS, ifa_flags))
                goto nla_put_failure;
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
index d332aefb0846f86a11d924e3e1e7ad23e279dda2..df48034378889eac83c6b262a76a32028202efb4 100644 (file)
@@ -298,7 +298,8 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct nlattr *att
                        int type = nla_type(attr);
 
                        if (type) {
-                               if (type > RTAX_MAX || nla_len(attr) < 4)
+                               if (type > RTAX_MAX || type == RTAX_CC_ALGO ||
+                                   nla_len(attr) < 4)
                                        goto err_inval;
 
                                fi->fib_metrics[type-1] = nla_get_u32(attr);
index daccc4a36d80ea9df6e986d53b83fc95b807b8b8..1d7c1256e8458d35e4a9f9daa392aba37672e1bf 100644 (file)
@@ -1616,7 +1616,8 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
            nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0)
                goto errout;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 errout:
        nlmsg_cancel(skb, nlh);
@@ -1709,9 +1710,6 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
                rt->rt_flags |= RTCF_NOTIFY;
 
        err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
-
-       if (err == 0)
-               goto out_free;
        if (err < 0) {
                err = -EMSGSIZE;
                goto out_free;
@@ -1762,7 +1760,7 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        skb_dst_set(skb, dst_clone(&rt->dst));
                        if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).portid,
                                        cb->nlh->nlmsg_seq, RTM_NEWROUTE,
-                                       1, NLM_F_MULTI) <= 0) {
+                                       1, NLM_F_MULTI) < 0) {
                                skb_dst_drop(skb);
                                rcu_read_unlock_bh();
                                goto done;
index 86e3807052e9ab4feb56fcf88a06391c18f1472e..1540b506e3e0b4f1fa094d2c928d22d20c4dbf3a 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/route.h> /* RTF_xxx */
 #include <net/neighbour.h>
 #include <net/netlink.h>
+#include <net/tcp.h>
 #include <net/dst.h>
 #include <net/flow.h>
 #include <net/fib_rules.h>
@@ -273,7 +274,8 @@ static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi)
        size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
                         + nla_total_size(4) /* RTA_TABLE */
                         + nla_total_size(2) /* RTA_DST */
-                        + nla_total_size(4); /* RTA_PRIORITY */
+                        + nla_total_size(4) /* RTA_PRIORITY */
+                        + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
 
        /* space for nested metrics */
        payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
@@ -365,7 +367,8 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                nla_nest_end(skb, mp_head);
        }
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 errout:
        nlmsg_cancel(skb, nlh);
index 515569ffde8a16af9eea82cc85ded4f8d5ce59cc..589aafd01fc5256a0fac138bac3240ab191b507e 100644 (file)
@@ -46,6 +46,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
        snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x",
                        ds->index, ds->pd->sw_addr);
        ds->slave_mii_bus->parent = ds->master_dev;
+       ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
 }
 
 
diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
new file mode 100644 (file)
index 0000000..e50f69d
--- /dev/null
@@ -0,0 +1,72 @@
+#ifndef __IEEE802154_6LOWPAN_I_H__
+#define __IEEE802154_6LOWPAN_I_H__
+
+#include <linux/list.h>
+
+#include <net/ieee802154_netdev.h>
+#include <net/inet_frag.h>
+
+struct lowpan_create_arg {
+       u16 tag;
+       u16 d_size;
+       const struct ieee802154_addr *src;
+       const struct ieee802154_addr *dst;
+};
+
+/* Equivalent of ipv4 struct ip
+ */
+struct lowpan_frag_queue {
+       struct inet_frag_queue  q;
+
+       u16                     tag;
+       u16                     d_size;
+       struct ieee802154_addr  saddr;
+       struct ieee802154_addr  daddr;
+};
+
+static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
+{
+       switch (a->mode) {
+       case IEEE802154_ADDR_LONG:
+               return (((__force u64)a->extended_addr) >> 32) ^
+                       (((__force u64)a->extended_addr) & 0xffffffff);
+       case IEEE802154_ADDR_SHORT:
+               return (__force u32)(a->short_addr);
+       default:
+               return 0;
+       }
+}
+
+struct lowpan_dev_record {
+       struct net_device *ldev;
+       struct list_head list;
+};
+
+/* private device info */
+struct lowpan_dev_info {
+       struct net_device       *real_dev; /* real WPAN device ptr */
+       struct mutex            dev_list_mtx; /* mutex for list ops */
+       u16                     fragment_tag;
+};
+
+static inline struct
+lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
+{
+       return netdev_priv(dev);
+}
+
+extern struct list_head lowpan_devices;
+
+int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
+void lowpan_net_frag_exit(void);
+int lowpan_net_frag_init(void);
+
+void lowpan_rx_init(void);
+void lowpan_rx_exit(void);
+
+int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
+                        unsigned short type, const void *_daddr,
+                        const void *_saddr, unsigned int len);
+netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev);
+
+#endif /* __IEEE802154_6LOWPAN_I_H__ */
diff --git a/net/ieee802154/6lowpan/Kconfig b/net/ieee802154/6lowpan/Kconfig
new file mode 100644 (file)
index 0000000..d24f985
--- /dev/null
@@ -0,0 +1,5 @@
+config IEEE802154_6LOWPAN
+       tristate "6lowpan support over IEEE 802.15.4"
+       depends on 6LOWPAN
+       ---help---
+         IPv6 compression over IEEE 802.15.4.
diff --git a/net/ieee802154/6lowpan/Makefile b/net/ieee802154/6lowpan/Makefile
new file mode 100644 (file)
index 0000000..6bfb270
--- /dev/null
@@ -0,0 +1,3 @@
+obj-$(CONFIG_IEEE802154_6LOWPAN) += ieee802154_6lowpan.o
+
+ieee802154_6lowpan-y := core.o rx.o reassembly.o tx.o
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
new file mode 100644 (file)
index 0000000..055fbb7
--- /dev/null
@@ -0,0 +1,304 @@
+/* Copyright 2011, Siemens AG
+ * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+/* Based on patches from Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Jon's code is based on 6lowpan implementation for Contiki which is:
+ * Copyright (c) 2008, Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Institute nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ieee802154.h>
+
+#include <net/ipv6.h>
+
+#include "6lowpan_i.h"
+
+LIST_HEAD(lowpan_devices);
+static int lowpan_open_count;
+
+static __le16 lowpan_get_pan_id(const struct net_device *dev)
+{
+       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
+       return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
+}
+
+static __le16 lowpan_get_short_addr(const struct net_device *dev)
+{
+       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
+       return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
+}
+
+static u8 lowpan_get_dsn(const struct net_device *dev)
+{
+       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
+       return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
+}
+
+static struct header_ops lowpan_header_ops = {
+       .create = lowpan_header_create,
+};
+
+static struct lock_class_key lowpan_tx_busylock;
+static struct lock_class_key lowpan_netdev_xmit_lock_key;
+
+static void lowpan_set_lockdep_class_one(struct net_device *dev,
+                                        struct netdev_queue *txq,
+                                        void *_unused)
+{
+       lockdep_set_class(&txq->_xmit_lock,
+                         &lowpan_netdev_xmit_lock_key);
+}
+
+static int lowpan_dev_init(struct net_device *dev)
+{
+       netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
+       dev->qdisc_tx_busylock = &lowpan_tx_busylock;
+       return 0;
+}
+
+static const struct net_device_ops lowpan_netdev_ops = {
+       .ndo_init               = lowpan_dev_init,
+       .ndo_start_xmit         = lowpan_xmit,
+};
+
+static struct ieee802154_mlme_ops lowpan_mlme = {
+       .get_pan_id = lowpan_get_pan_id,
+       .get_short_addr = lowpan_get_short_addr,
+       .get_dsn = lowpan_get_dsn,
+};
+
+static void lowpan_setup(struct net_device *dev)
+{
+       dev->addr_len           = IEEE802154_ADDR_LEN;
+       memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
+       dev->type               = ARPHRD_IEEE802154;
+       /* Frame Control + Sequence Number + Address fields + Security Header */
+       dev->hard_header_len    = 2 + 1 + 20 + 14;
+       dev->needed_tailroom    = 2; /* FCS */
+       dev->mtu                = IPV6_MIN_MTU;
+       dev->tx_queue_len       = 0;
+       dev->flags              = IFF_BROADCAST | IFF_MULTICAST;
+       dev->watchdog_timeo     = 0;
+
+       dev->netdev_ops         = &lowpan_netdev_ops;
+       dev->header_ops         = &lowpan_header_ops;
+       dev->ml_priv            = &lowpan_mlme;
+       dev->destructor         = free_netdev;
+}
+
+static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       if (tb[IFLA_ADDRESS]) {
+               if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
+                       return -EINVAL;
+       }
+       return 0;
+}
+
+static int lowpan_newlink(struct net *src_net, struct net_device *dev,
+                         struct nlattr *tb[], struct nlattr *data[])
+{
+       struct net_device *real_dev;
+       struct lowpan_dev_record *entry;
+       int ret;
+
+       ASSERT_RTNL();
+
+       pr_debug("adding new link\n");
+
+       if (!tb[IFLA_LINK])
+               return -EINVAL;
+       /* find and hold real wpan device */
+       real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+       if (!real_dev)
+               return -ENODEV;
+       if (real_dev->type != ARPHRD_IEEE802154) {
+               dev_put(real_dev);
+               return -EINVAL;
+       }
+
+       lowpan_dev_info(dev)->real_dev = real_dev;
+       mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry) {
+               dev_put(real_dev);
+               lowpan_dev_info(dev)->real_dev = NULL;
+               return -ENOMEM;
+       }
+
+       entry->ldev = dev;
+
+       /* Set the lowpan hardware address to the wpan hardware address. */
+       memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
+
+       mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
+       INIT_LIST_HEAD(&entry->list);
+       list_add_tail(&entry->list, &lowpan_devices);
+       mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+
+       ret = register_netdevice(dev);
+       if (ret >= 0) {
+               if (!lowpan_open_count)
+                       lowpan_rx_init();
+               lowpan_open_count++;
+       }
+
+       return ret;
+}
+
+static void lowpan_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
+       struct net_device *real_dev = lowpan_dev->real_dev;
+       struct lowpan_dev_record *entry, *tmp;
+
+       ASSERT_RTNL();
+
+       lowpan_open_count--;
+       if (!lowpan_open_count)
+               lowpan_rx_exit();
+
+       mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
+       list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
+               if (entry->ldev == dev) {
+                       list_del(&entry->list);
+                       kfree(entry);
+               }
+       }
+       mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+
+       mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
+
+       unregister_netdevice_queue(dev, head);
+
+       dev_put(real_dev);
+}
+
+static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
+       .kind           = "lowpan",
+       .priv_size      = sizeof(struct lowpan_dev_info),
+       .setup          = lowpan_setup,
+       .newlink        = lowpan_newlink,
+       .dellink        = lowpan_dellink,
+       .validate       = lowpan_validate,
+};
+
+static inline int __init lowpan_netlink_init(void)
+{
+       return rtnl_link_register(&lowpan_link_ops);
+}
+
+static inline void lowpan_netlink_fini(void)
+{
+       rtnl_link_unregister(&lowpan_link_ops);
+}
+
+static int lowpan_device_event(struct notifier_block *unused,
+                              unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       LIST_HEAD(del_list);
+       struct lowpan_dev_record *entry, *tmp;
+
+       if (dev->type != ARPHRD_IEEE802154)
+               goto out;
+
+       if (event == NETDEV_UNREGISTER) {
+               list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
+                       if (lowpan_dev_info(entry->ldev)->real_dev == dev)
+                               lowpan_dellink(entry->ldev, &del_list);
+               }
+
+               unregister_netdevice_many(&del_list);
+       }
+
+out:
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block lowpan_dev_notifier = {
+       .notifier_call = lowpan_device_event,
+};
+
+static int __init lowpan_init_module(void)
+{
+       int err = 0;
+
+       err = lowpan_net_frag_init();
+       if (err < 0)
+               goto out;
+
+       err = lowpan_netlink_init();
+       if (err < 0)
+               goto out_frag;
+
+       err = register_netdevice_notifier(&lowpan_dev_notifier);
+       if (err < 0)
+               goto out_pack;
+
+       return 0;
+
+out_pack:
+       lowpan_netlink_fini();
+out_frag:
+       lowpan_net_frag_exit();
+out:
+       return err;
+}
+
+static void __exit lowpan_cleanup_module(void)
+{
+       lowpan_netlink_fini();
+
+       lowpan_net_frag_exit();
+
+       unregister_netdevice_notifier(&lowpan_dev_notifier);
+}
+
+module_init(lowpan_init_module);
+module_exit(lowpan_cleanup_module);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("lowpan");
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
new file mode 100644 (file)
index 0000000..f46e4d1
--- /dev/null
@@ -0,0 +1,585 @@
+/*     6LoWPAN fragment reassembly
+ *
+ *
+ *     Authors:
+ *     Alexander Aring         <aar@pengutronix.de>
+ *
+ *     Based on: net/ipv6/reassembly.c
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "6LoWPAN: " fmt
+
+#include <linux/net.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <net/ieee802154_netdev.h>
+#include <net/6lowpan.h>
+#include <net/ipv6.h>
+#include <net/inet_frag.h>
+
+#include "6lowpan_i.h"
+
+static const char lowpan_frags_cache_name[] = "lowpan-frags";
+
+struct lowpan_frag_info {
+       u16 d_tag;
+       u16 d_size;
+       u8 d_offset;
+};
+
+static struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
+{
+       return (struct lowpan_frag_info *)skb->cb;
+}
+
+static struct inet_frags lowpan_frags;
+
+static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
+                            struct sk_buff *prev, struct net_device *dev);
+
+static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
+                                    const struct ieee802154_addr *saddr,
+                                    const struct ieee802154_addr *daddr)
+{
+       net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
+       return jhash_3words(ieee802154_addr_hash(saddr),
+                           ieee802154_addr_hash(daddr),
+                           (__force u32)(tag + (d_size << 16)),
+                           lowpan_frags.rnd);
+}
+
+static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
+{
+       const struct lowpan_frag_queue *fq;
+
+       fq = container_of(q, struct lowpan_frag_queue, q);
+       return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
+}
+
+static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
+{
+       const struct lowpan_frag_queue *fq;
+       const struct lowpan_create_arg *arg = a;
+
+       fq = container_of(q, struct lowpan_frag_queue, q);
+       return  fq->tag == arg->tag && fq->d_size == arg->d_size &&
+               ieee802154_addr_equal(&fq->saddr, arg->src) &&
+               ieee802154_addr_equal(&fq->daddr, arg->dst);
+}
+
+static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
+{
+       const struct lowpan_create_arg *arg = a;
+       struct lowpan_frag_queue *fq;
+
+       fq = container_of(q, struct lowpan_frag_queue, q);
+
+       fq->tag = arg->tag;
+       fq->d_size = arg->d_size;
+       fq->saddr = *arg->src;
+       fq->daddr = *arg->dst;
+}
+
+static void lowpan_frag_expire(unsigned long data)
+{
+       struct frag_queue *fq;
+       struct net *net;
+
+       fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
+       net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
+
+       spin_lock(&fq->q.lock);
+
+       if (fq->q.flags & INET_FRAG_COMPLETE)
+               goto out;
+
+       inet_frag_kill(&fq->q, &lowpan_frags);
+out:
+       spin_unlock(&fq->q.lock);
+       inet_frag_put(&fq->q, &lowpan_frags);
+}
+
+static inline struct lowpan_frag_queue *
+fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
+       const struct ieee802154_addr *src,
+       const struct ieee802154_addr *dst)
+{
+       struct inet_frag_queue *q;
+       struct lowpan_create_arg arg;
+       unsigned int hash;
+       struct netns_ieee802154_lowpan *ieee802154_lowpan =
+               net_ieee802154_lowpan(net);
+
+       arg.tag = frag_info->d_tag;
+       arg.d_size = frag_info->d_size;
+       arg.src = src;
+       arg.dst = dst;
+
+       hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
+
+       q = inet_frag_find(&ieee802154_lowpan->frags,
+                          &lowpan_frags, &arg, hash);
+       if (IS_ERR_OR_NULL(q)) {
+               inet_frag_maybe_warn_overflow(q, pr_fmt());
+               return NULL;
+       }
+       return container_of(q, struct lowpan_frag_queue, q);
+}
+
+static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
+                            struct sk_buff *skb, const u8 frag_type)
+{
+       struct sk_buff *prev, *next;
+       struct net_device *dev;
+       int end, offset;
+
+       if (fq->q.flags & INET_FRAG_COMPLETE)
+               goto err;
+
+       offset = lowpan_cb(skb)->d_offset << 3;
+       end = lowpan_cb(skb)->d_size;
+
+       /* Is this the final fragment? */
+       if (offset + skb->len == end) {
+               /* If we already have some bits beyond end
+                * or have different end, the segment is corrupted.
+                */
+               if (end < fq->q.len ||
+                   ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
+                       goto err;
+               fq->q.flags |= INET_FRAG_LAST_IN;
+               fq->q.len = end;
+       } else {
+               if (end > fq->q.len) {
+                       /* Some bits beyond end -> corruption. */
+                       if (fq->q.flags & INET_FRAG_LAST_IN)
+                               goto err;
+                       fq->q.len = end;
+               }
+       }
+
+       /* Find out which fragments are in front and at the back of us
+        * in the chain of fragments so far.  We must know where to put
+        * this fragment, right?
+        */
+       prev = fq->q.fragments_tail;
+       if (!prev || lowpan_cb(prev)->d_offset < lowpan_cb(skb)->d_offset) {
+               next = NULL;
+               goto found;
+       }
+       prev = NULL;
+       for (next = fq->q.fragments; next != NULL; next = next->next) {
+               if (lowpan_cb(next)->d_offset >= lowpan_cb(skb)->d_offset)
+                       break;  /* bingo! */
+               prev = next;
+       }
+
+found:
+       /* Insert this fragment in the chain of fragments. */
+       skb->next = next;
+       if (!next)
+               fq->q.fragments_tail = skb;
+       if (prev)
+               prev->next = skb;
+       else
+               fq->q.fragments = skb;
+
+       dev = skb->dev;
+       if (dev)
+               skb->dev = NULL;
+
+       fq->q.stamp = skb->tstamp;
+       if (frag_type == LOWPAN_DISPATCH_FRAG1) {
+               /* Calculate uncomp. 6lowpan header to estimate full size */
+               fq->q.meat += lowpan_uncompress_size(skb, NULL);
+               fq->q.flags |= INET_FRAG_FIRST_IN;
+       } else {
+               fq->q.meat += skb->len;
+       }
+       add_frag_mem_limit(&fq->q, skb->truesize);
+
+       if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+           fq->q.meat == fq->q.len) {
+               int res;
+               unsigned long orefdst = skb->_skb_refdst;
+
+               skb->_skb_refdst = 0UL;
+               res = lowpan_frag_reasm(fq, prev, dev);
+               skb->_skb_refdst = orefdst;
+               return res;
+       }
+
+       return -1;
+err:
+       kfree_skb(skb);
+       return -1;
+}
+
+/*     Check if this packet is complete.
+ *     Returns NULL on failure by any reason, and pointer
+ *     to current nexthdr field in reassembled frame.
+ *
+ *     It is called with locked fq, and caller must check that
+ *     queue is eligible for reassembly i.e. it is not COMPLETE,
+ *     the last and the first frames arrived and all the bits are here.
+ */
+static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
+                            struct net_device *dev)
+{
+       struct sk_buff *fp, *head = fq->q.fragments;
+       int sum_truesize;
+
+       inet_frag_kill(&fq->q, &lowpan_frags);
+
+       /* Make the one we just received the head. */
+       if (prev) {
+               head = prev->next;
+               fp = skb_clone(head, GFP_ATOMIC);
+
+               if (!fp)
+                       goto out_oom;
+
+               fp->next = head->next;
+               if (!fp->next)
+                       fq->q.fragments_tail = fp;
+               prev->next = fp;
+
+               skb_morph(head, fq->q.fragments);
+               head->next = fq->q.fragments->next;
+
+               consume_skb(fq->q.fragments);
+               fq->q.fragments = head;
+       }
+
+       /* Head of list must not be cloned. */
+       if (skb_unclone(head, GFP_ATOMIC))
+               goto out_oom;
+
+       /* If the first fragment is fragmented itself, we split
+        * it to two chunks: the first with data and paged part
+        * and the second, holding only fragments.
+        */
+       if (skb_has_frag_list(head)) {
+               struct sk_buff *clone;
+               int i, plen = 0;
+
+               clone = alloc_skb(0, GFP_ATOMIC);
+               if (!clone)
+                       goto out_oom;
+               clone->next = head->next;
+               head->next = clone;
+               skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+               skb_frag_list_init(head);
+               for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+                       plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
+               clone->len = head->data_len - plen;
+               clone->data_len = clone->len;
+               head->data_len -= clone->len;
+               head->len -= clone->len;
+               add_frag_mem_limit(&fq->q, clone->truesize);
+       }
+
+       WARN_ON(head == NULL);
+
+       sum_truesize = head->truesize;
+       for (fp = head->next; fp;) {
+               bool headstolen;
+               int delta;
+               struct sk_buff *next = fp->next;
+
+               sum_truesize += fp->truesize;
+               if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
+                       kfree_skb_partial(fp, headstolen);
+               } else {
+                       if (!skb_shinfo(head)->frag_list)
+                               skb_shinfo(head)->frag_list = fp;
+                       head->data_len += fp->len;
+                       head->len += fp->len;
+                       head->truesize += fp->truesize;
+               }
+               fp = next;
+       }
+       sub_frag_mem_limit(&fq->q, sum_truesize);
+
+       head->next = NULL;
+       head->dev = dev;
+       head->tstamp = fq->q.stamp;
+
+       fq->q.fragments = NULL;
+       fq->q.fragments_tail = NULL;
+
+       return 1;
+out_oom:
+       net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
+       return -1;
+}
+
+static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
+                               struct lowpan_frag_info *frag_info)
+{
+       bool fail;
+       u8 pattern = 0, low = 0;
+       __be16 d_tag = 0;
+
+       fail = lowpan_fetch_skb(skb, &pattern, 1);
+       fail |= lowpan_fetch_skb(skb, &low, 1);
+       frag_info->d_size = (pattern & 7) << 8 | low;
+       fail |= lowpan_fetch_skb(skb, &d_tag, 2);
+       frag_info->d_tag = ntohs(d_tag);
+
+       if (frag_type == LOWPAN_DISPATCH_FRAGN) {
+               fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
+       } else {
+               skb_reset_network_header(skb);
+               frag_info->d_offset = 0;
+       }
+
+       if (unlikely(fail))
+               return -EIO;
+
+       return 0;
+}
+
+int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
+{
+       struct lowpan_frag_queue *fq;
+       struct net *net = dev_net(skb->dev);
+       struct lowpan_frag_info *frag_info = lowpan_cb(skb);
+       struct ieee802154_addr source, dest;
+       int err;
+
+       source = mac_cb(skb)->source;
+       dest = mac_cb(skb)->dest;
+
+       err = lowpan_get_frag_info(skb, frag_type, frag_info);
+       if (err < 0)
+               goto err;
+
+       if (frag_info->d_size > IPV6_MIN_MTU) {
+               net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
+               goto err;
+       }
+
+       fq = fq_find(net, frag_info, &source, &dest);
+       if (fq != NULL) {
+               int ret;
+
+               spin_lock(&fq->q.lock);
+               ret = lowpan_frag_queue(fq, skb, frag_type);
+               spin_unlock(&fq->q.lock);
+
+               inet_frag_put(&fq->q, &lowpan_frags);
+               return ret;
+       }
+
+err:
+       kfree_skb(skb);
+       return -1;
+}
+EXPORT_SYMBOL(lowpan_frag_rcv);
+
+#ifdef CONFIG_SYSCTL
+static int zero;
+
+static struct ctl_table lowpan_frags_ns_ctl_table[] = {
+       {
+               .procname       = "6lowpanfrag_high_thresh",
+               .data           = &init_net.ieee802154_lowpan.frags.high_thresh,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &init_net.ieee802154_lowpan.frags.low_thresh
+       },
+       {
+               .procname       = "6lowpanfrag_low_thresh",
+               .data           = &init_net.ieee802154_lowpan.frags.low_thresh,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &init_net.ieee802154_lowpan.frags.high_thresh
+       },
+       {
+               .procname       = "6lowpanfrag_time",
+               .data           = &init_net.ieee802154_lowpan.frags.timeout,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
+       { }
+};
+
+/* secret interval has been deprecated */
+static int lowpan_frags_secret_interval_unused;
+static struct ctl_table lowpan_frags_ctl_table[] = {
+       {
+               .procname       = "6lowpanfrag_secret_interval",
+               .data           = &lowpan_frags_secret_interval_unused,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
+       { }
+};
+
+static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
+{
+       struct ctl_table *table;
+       struct ctl_table_header *hdr;
+       struct netns_ieee802154_lowpan *ieee802154_lowpan =
+               net_ieee802154_lowpan(net);
+
+       table = lowpan_frags_ns_ctl_table;
+       if (!net_eq(net, &init_net)) {
+               table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
+                               GFP_KERNEL);
+               if (table == NULL)
+                       goto err_alloc;
+
+               table[0].data = &ieee802154_lowpan->frags.high_thresh;
+               table[0].extra1 = &ieee802154_lowpan->frags.low_thresh;
+               table[0].extra2 = &init_net.ieee802154_lowpan.frags.high_thresh;
+               table[1].data = &ieee802154_lowpan->frags.low_thresh;
+               table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
+               table[2].data = &ieee802154_lowpan->frags.timeout;
+
+               /* Don't export sysctls to unprivileged users */
+               if (net->user_ns != &init_user_ns)
+                       table[0].procname = NULL;
+       }
+
+       hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
+       if (hdr == NULL)
+               goto err_reg;
+
+       ieee802154_lowpan->sysctl.frags_hdr = hdr;
+       return 0;
+
+err_reg:
+       if (!net_eq(net, &init_net))
+               kfree(table);
+err_alloc:
+       return -ENOMEM;
+}
+
+static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
+{
+       struct ctl_table *table;
+       struct netns_ieee802154_lowpan *ieee802154_lowpan =
+               net_ieee802154_lowpan(net);
+
+       table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
+       unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
+       if (!net_eq(net, &init_net))
+               kfree(table);
+}
+
+static struct ctl_table_header *lowpan_ctl_header;
+
+static int __init lowpan_frags_sysctl_register(void)
+{
+       lowpan_ctl_header = register_net_sysctl(&init_net,
+                                               "net/ieee802154/6lowpan",
+                                               lowpan_frags_ctl_table);
+       return lowpan_ctl_header == NULL ? -ENOMEM : 0;
+}
+
+static void lowpan_frags_sysctl_unregister(void)
+{
+       unregister_net_sysctl_table(lowpan_ctl_header);
+}
+#else
+static inline int lowpan_frags_ns_sysctl_register(struct net *net)
+{
+       return 0;
+}
+
+static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
+{
+}
+
+static inline int __init lowpan_frags_sysctl_register(void)
+{
+       return 0;
+}
+
+static inline void lowpan_frags_sysctl_unregister(void)
+{
+}
+#endif
+
+static int __net_init lowpan_frags_init_net(struct net *net)
+{
+       struct netns_ieee802154_lowpan *ieee802154_lowpan =
+               net_ieee802154_lowpan(net);
+
+       ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+       ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+       ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
+
+       inet_frags_init_net(&ieee802154_lowpan->frags);
+
+       return lowpan_frags_ns_sysctl_register(net);
+}
+
+static void __net_exit lowpan_frags_exit_net(struct net *net)
+{
+       struct netns_ieee802154_lowpan *ieee802154_lowpan =
+               net_ieee802154_lowpan(net);
+
+       lowpan_frags_ns_sysctl_unregister(net);
+       inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
+}
+
+static struct pernet_operations lowpan_frags_ops = {
+       .init = lowpan_frags_init_net,
+       .exit = lowpan_frags_exit_net,
+};
+
+int __init lowpan_net_frag_init(void)
+{
+       int ret;
+
+       ret = lowpan_frags_sysctl_register();
+       if (ret)
+               return ret;
+
+       ret = register_pernet_subsys(&lowpan_frags_ops);
+       if (ret)
+               goto err_pernet;
+
+       lowpan_frags.hashfn = lowpan_hashfn;
+       lowpan_frags.constructor = lowpan_frag_init;
+       lowpan_frags.destructor = NULL;
+       lowpan_frags.skb_free = NULL;
+       lowpan_frags.qsize = sizeof(struct frag_queue);
+       lowpan_frags.match = lowpan_frag_match;
+       lowpan_frags.frag_expire = lowpan_frag_expire;
+       lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
+       ret = inet_frags_init(&lowpan_frags);
+       if (ret)
+               goto err_pernet;
+
+       return ret;
+err_pernet:
+       lowpan_frags_sysctl_unregister();
+       return ret;
+}
+
+void lowpan_net_frag_exit(void)
+{
+       inet_frags_fini(&lowpan_frags);
+       lowpan_frags_sysctl_unregister();
+       unregister_pernet_subsys(&lowpan_frags_ops);
+}
diff --git a/net/ieee802154/6lowpan/rx.c b/net/ieee802154/6lowpan/rx.c
new file mode 100644 (file)
index 0000000..4be1d28
--- /dev/null
@@ -0,0 +1,171 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/if_arp.h>
+
+#include <net/6lowpan.h>
+#include <net/ieee802154_netdev.h>
+
+#include "6lowpan_i.h"
+
+static int lowpan_give_skb_to_devices(struct sk_buff *skb,
+                                     struct net_device *dev)
+{
+       struct lowpan_dev_record *entry;
+       struct sk_buff *skb_cp;
+       int stat = NET_RX_SUCCESS;
+
+       skb->protocol = htons(ETH_P_IPV6);
+       skb->pkt_type = PACKET_HOST;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(entry, &lowpan_devices, list)
+               if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
+                       skb_cp = skb_copy(skb, GFP_ATOMIC);
+                       if (!skb_cp) {
+                               kfree_skb(skb);
+                               rcu_read_unlock();
+                               return NET_RX_DROP;
+                       }
+
+                       skb_cp->dev = entry->ldev;
+                       stat = netif_rx(skb_cp);
+                       if (stat == NET_RX_DROP)
+                               break;
+               }
+       rcu_read_unlock();
+
+       consume_skb(skb);
+
+       return stat;
+}
+
+static int
+iphc_decompress(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
+{
+       u8 iphc0, iphc1;
+       struct ieee802154_addr_sa sa, da;
+       void *sap, *dap;
+
+       raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
+       /* at least two bytes will be used for the encoding */
+       if (skb->len < 2)
+               return -EINVAL;
+
+       if (lowpan_fetch_skb_u8(skb, &iphc0))
+               return -EINVAL;
+
+       if (lowpan_fetch_skb_u8(skb, &iphc1))
+               return -EINVAL;
+
+       ieee802154_addr_to_sa(&sa, &hdr->source);
+       ieee802154_addr_to_sa(&da, &hdr->dest);
+
+       if (sa.addr_type == IEEE802154_ADDR_SHORT)
+               sap = &sa.short_addr;
+       else
+               sap = &sa.hwaddr;
+
+       if (da.addr_type == IEEE802154_ADDR_SHORT)
+               dap = &da.short_addr;
+       else
+               dap = &da.hwaddr;
+
+       return lowpan_header_decompress(skb, skb->dev, sap, sa.addr_type,
+                                       IEEE802154_ADDR_LEN, dap, da.addr_type,
+                                       IEEE802154_ADDR_LEN, iphc0, iphc1);
+}
+
+static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
+                     struct packet_type *pt, struct net_device *orig_dev)
+{
+       struct ieee802154_hdr hdr;
+       int ret;
+
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
+               goto drop;
+
+       if (!netif_running(dev))
+               goto drop_skb;
+
+       if (skb->pkt_type == PACKET_OTHERHOST)
+               goto drop_skb;
+
+       if (dev->type != ARPHRD_IEEE802154)
+               goto drop_skb;
+
+       if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
+               goto drop_skb;
+
+       /* check that it's our buffer */
+       if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
+               /* Pull off the 1-byte of 6lowpan header. */
+               skb_pull(skb, 1);
+               return lowpan_give_skb_to_devices(skb, NULL);
+       } else {
+               switch (skb->data[0] & 0xe0) {
+               case LOWPAN_DISPATCH_IPHC:      /* ipv6 datagram */
+                       ret = iphc_decompress(skb, &hdr);
+                       if (ret < 0)
+                               goto drop_skb;
+
+                       return lowpan_give_skb_to_devices(skb, NULL);
+               case LOWPAN_DISPATCH_FRAG1:     /* first fragment header */
+                       ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
+                       if (ret == 1) {
+                               ret = iphc_decompress(skb, &hdr);
+                               if (ret < 0)
+                                       goto drop_skb;
+
+                               return lowpan_give_skb_to_devices(skb, NULL);
+                       } else if (ret == -1) {
+                               return NET_RX_DROP;
+                       } else {
+                               return NET_RX_SUCCESS;
+                       }
+               case LOWPAN_DISPATCH_FRAGN:     /* next fragments headers */
+                       ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN);
+                       if (ret == 1) {
+                               ret = iphc_decompress(skb, &hdr);
+                               if (ret < 0)
+                                       goto drop_skb;
+
+                               return lowpan_give_skb_to_devices(skb, NULL);
+                       } else if (ret == -1) {
+                               return NET_RX_DROP;
+                       } else {
+                               return NET_RX_SUCCESS;
+                       }
+               default:
+                       break;
+               }
+       }
+
+drop_skb:
+       kfree_skb(skb);
+drop:
+       return NET_RX_DROP;
+}
+
+static struct packet_type lowpan_packet_type = {
+       .type = htons(ETH_P_IEEE802154),
+       .func = lowpan_rcv,
+};
+
+void lowpan_rx_init(void)
+{
+       dev_add_pack(&lowpan_packet_type);
+}
+
+void lowpan_rx_exit(void)
+{
+       dev_remove_pack(&lowpan_packet_type);
+}
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
new file mode 100644 (file)
index 0000000..2349070
--- /dev/null
@@ -0,0 +1,271 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <net/6lowpan.h>
+#include <net/ieee802154_netdev.h>
+
+#include "6lowpan_i.h"
+
+/* don't save pan id, it's intra pan */
+struct lowpan_addr {
+       u8 mode;
+       union {
+               /* IPv6 needs big endian here */
+               __be64 extended_addr;
+               __be16 short_addr;
+       } u;
+};
+
+struct lowpan_addr_info {
+       struct lowpan_addr daddr;
+       struct lowpan_addr saddr;
+};
+
+static inline struct
+lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
+{
+       WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info));
+       return (struct lowpan_addr_info *)(skb->data -
+                       sizeof(struct lowpan_addr_info));
+}
+
+int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
+                        unsigned short type, const void *_daddr,
+                        const void *_saddr, unsigned int len)
+{
+       const u8 *saddr = _saddr;
+       const u8 *daddr = _daddr;
+       struct lowpan_addr_info *info;
+
+       /* TODO:
+        * if this package isn't ipv6 one, where should it be routed?
+        */
+       if (type != ETH_P_IPV6)
+               return 0;
+
+       if (!saddr)
+               saddr = dev->dev_addr;
+
+       raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
+       raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
+
+       info = lowpan_skb_priv(skb);
+
+       /* TODO: Currently we only support extended_addr */
+       info->daddr.mode = IEEE802154_ADDR_LONG;
+       memcpy(&info->daddr.u.extended_addr, daddr,
+              sizeof(info->daddr.u.extended_addr));
+       info->saddr.mode = IEEE802154_ADDR_LONG;
+       memcpy(&info->saddr.u.extended_addr, saddr,
+              sizeof(info->daddr.u.extended_addr));
+
+       return 0;
+}
+
+static struct sk_buff*
+lowpan_alloc_frag(struct sk_buff *skb, int size,
+                 const struct ieee802154_hdr *master_hdr)
+{
+       struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev;
+       struct sk_buff *frag;
+       int rc;
+
+       frag = alloc_skb(real_dev->hard_header_len +
+                        real_dev->needed_tailroom + size,
+                        GFP_ATOMIC);
+
+       if (likely(frag)) {
+               frag->dev = real_dev;
+               frag->priority = skb->priority;
+               skb_reserve(frag, real_dev->hard_header_len);
+               skb_reset_network_header(frag);
+               *mac_cb(frag) = *mac_cb(skb);
+
+               rc = dev_hard_header(frag, real_dev, 0, &master_hdr->dest,
+                                    &master_hdr->source, size);
+               if (rc < 0) {
+                       kfree_skb(frag);
+                       return ERR_PTR(rc);
+               }
+       } else {
+               frag = ERR_PTR(-ENOMEM);
+       }
+
+       return frag;
+}
+
+static int
+lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
+                    u8 *frag_hdr, int frag_hdrlen,
+                    int offset, int len)
+{
+       struct sk_buff *frag;
+
+       raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
+
+       frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
+       if (IS_ERR(frag))
+               return -PTR_ERR(frag);
+
+       memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
+       memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
+
+       raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
+
+       return dev_queue_xmit(frag);
+}
+
+static int
+lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
+                      const struct ieee802154_hdr *wpan_hdr)
+{
+       u16 dgram_size, dgram_offset;
+       __be16 frag_tag;
+       u8 frag_hdr[5];
+       int frag_cap, frag_len, payload_cap, rc;
+       int skb_unprocessed, skb_offset;
+
+       dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
+                    skb->mac_len;
+       frag_tag = htons(lowpan_dev_info(dev)->fragment_tag);
+       lowpan_dev_info(dev)->fragment_tag++;
+
+       frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
+       frag_hdr[1] = dgram_size & 0xff;
+       memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
+
+       payload_cap = ieee802154_max_payload(wpan_hdr);
+
+       frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
+                             skb_network_header_len(skb), 8);
+
+       skb_offset = skb_network_header_len(skb);
+       skb_unprocessed = skb->len - skb->mac_len - skb_offset;
+
+       rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
+                                 LOWPAN_FRAG1_HEAD_SIZE, 0,
+                                 frag_len + skb_network_header_len(skb));
+       if (rc) {
+               pr_debug("%s unable to send FRAG1 packet (tag: %d)",
+                        __func__, ntohs(frag_tag));
+               goto err;
+       }
+
+       frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
+       frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
+       frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
+
+       do {
+               dgram_offset += frag_len;
+               skb_offset += frag_len;
+               skb_unprocessed -= frag_len;
+               frag_len = min(frag_cap, skb_unprocessed);
+
+               frag_hdr[4] = dgram_offset >> 3;
+
+               rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
+                                         LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
+                                         frag_len);
+               if (rc) {
+                       pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
+                                __func__, ntohs(frag_tag), skb_offset);
+                       goto err;
+               }
+       } while (skb_unprocessed > frag_cap);
+
+       consume_skb(skb);
+       return NET_XMIT_SUCCESS;
+
+err:
+       kfree_skb(skb);
+       return rc;
+}
+
+static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ieee802154_addr sa, da;
+       struct ieee802154_mac_cb *cb = mac_cb_init(skb);
+       struct lowpan_addr_info info;
+       void *daddr, *saddr;
+
+       memcpy(&info, lowpan_skb_priv(skb), sizeof(info));
+
+       /* TODO: Currently we only support extended_addr */
+       daddr = &info.daddr.u.extended_addr;
+       saddr = &info.saddr.u.extended_addr;
+
+       lowpan_header_compress(skb, dev, ETH_P_IPV6, daddr, saddr, skb->len);
+
+       cb->type = IEEE802154_FC_TYPE_DATA;
+
+       /* prepare wpan address data */
+       sa.mode = IEEE802154_ADDR_LONG;
+       sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+       sa.extended_addr = ieee802154_devaddr_from_raw(saddr);
+
+       /* intra-PAN communications */
+       da.pan_id = sa.pan_id;
+
+       /* if the destination address is the broadcast address, use the
+        * corresponding short address
+        */
+       if (lowpan_is_addr_broadcast((const u8 *)daddr)) {
+               da.mode = IEEE802154_ADDR_SHORT;
+               da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
+               cb->ackreq = false;
+       } else {
+               da.mode = IEEE802154_ADDR_LONG;
+               da.extended_addr = ieee802154_devaddr_from_raw(daddr);
+               cb->ackreq = true;
+       }
+
+       return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
+                       ETH_P_IPV6, (void *)&da, (void *)&sa, 0);
+}
+
+netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ieee802154_hdr wpan_hdr;
+       int max_single, ret;
+
+       pr_debug("package xmit\n");
+
+       /* We must take a copy of the skb before we modify/replace the ipv6
+        * header as the header could be used elsewhere
+        */
+       skb = skb_unshare(skb, GFP_ATOMIC);
+       if (!skb)
+               return NET_XMIT_DROP;
+
+       ret = lowpan_header(skb, dev);
+       if (ret < 0) {
+               kfree_skb(skb);
+               return NET_XMIT_DROP;
+       }
+
+       if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
+               kfree_skb(skb);
+               return NET_XMIT_DROP;
+       }
+
+       max_single = ieee802154_max_payload(&wpan_hdr);
+
+       if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
+               skb->dev = lowpan_dev_info(dev)->real_dev;
+               return dev_queue_xmit(skb);
+       } else {
+               netdev_tx_t rc;
+
+               pr_debug("frame is too big, fragmentation is needed\n");
+               rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr);
+
+               return rc < 0 ? NET_XMIT_DROP : rc;
+       }
+}
diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
deleted file mode 100644 (file)
index 27eaa65..0000000
+++ /dev/null
@@ -1,729 +0,0 @@
-/* Copyright 2011, Siemens AG
- * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
- */
-
-/* Based on patches from Jon Smirl <jonsmirl@gmail.com>
- * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-/* Jon's code is based on 6lowpan implementation for Contiki which is:
- * Copyright (c) 2008, Swedish Institute of Computer Science.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the Institute nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <linux/bitops.h>
-#include <linux/if_arp.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/netdevice.h>
-#include <linux/ieee802154.h>
-#include <net/af_ieee802154.h>
-#include <net/ieee802154_netdev.h>
-#include <net/6lowpan.h>
-#include <net/ipv6.h>
-
-#include "reassembly.h"
-
-static LIST_HEAD(lowpan_devices);
-static int lowpan_open_count;
-
-/* private device info */
-struct lowpan_dev_info {
-       struct net_device       *real_dev; /* real WPAN device ptr */
-       struct mutex            dev_list_mtx; /* mutex for list ops */
-       u16                     fragment_tag;
-};
-
-struct lowpan_dev_record {
-       struct net_device *ldev;
-       struct list_head list;
-};
-
-/* don't save pan id, it's intra pan */
-struct lowpan_addr {
-       u8 mode;
-       union {
-               /* IPv6 needs big endian here */
-               __be64 extended_addr;
-               __be16 short_addr;
-       } u;
-};
-
-struct lowpan_addr_info {
-       struct lowpan_addr daddr;
-       struct lowpan_addr saddr;
-};
-
-static inline struct
-lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
-{
-       return netdev_priv(dev);
-}
-
-static inline struct
-lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
-{
-       WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info));
-       return (struct lowpan_addr_info *)(skb->data -
-                       sizeof(struct lowpan_addr_info));
-}
-
-static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
-                               unsigned short type, const void *_daddr,
-                               const void *_saddr, unsigned int len)
-{
-       const u8 *saddr = _saddr;
-       const u8 *daddr = _daddr;
-       struct lowpan_addr_info *info;
-
-       /* TODO:
-        * if this package isn't ipv6 one, where should it be routed?
-        */
-       if (type != ETH_P_IPV6)
-               return 0;
-
-       if (!saddr)
-               saddr = dev->dev_addr;
-
-       raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
-       raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
-
-       info = lowpan_skb_priv(skb);
-
-       /* TODO: Currently we only support extended_addr */
-       info->daddr.mode = IEEE802154_ADDR_LONG;
-       memcpy(&info->daddr.u.extended_addr, daddr,
-              sizeof(info->daddr.u.extended_addr));
-       info->saddr.mode = IEEE802154_ADDR_LONG;
-       memcpy(&info->saddr.u.extended_addr, saddr,
-              sizeof(info->daddr.u.extended_addr));
-
-       return 0;
-}
-
-static int lowpan_give_skb_to_devices(struct sk_buff *skb,
-                                     struct net_device *dev)
-{
-       struct lowpan_dev_record *entry;
-       struct sk_buff *skb_cp;
-       int stat = NET_RX_SUCCESS;
-
-       skb->protocol = htons(ETH_P_IPV6);
-       skb->pkt_type = PACKET_HOST;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(entry, &lowpan_devices, list)
-               if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
-                       skb_cp = skb_copy(skb, GFP_ATOMIC);
-                       if (!skb_cp) {
-                               kfree_skb(skb);
-                               rcu_read_unlock();
-                               return NET_RX_DROP;
-                       }
-
-                       skb_cp->dev = entry->ldev;
-                       stat = netif_rx(skb_cp);
-                       if (stat == NET_RX_DROP)
-                               break;
-               }
-       rcu_read_unlock();
-
-       consume_skb(skb);
-
-       return stat;
-}
-
-static int
-iphc_decompress(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
-{
-       u8 iphc0, iphc1;
-       struct ieee802154_addr_sa sa, da;
-       void *sap, *dap;
-
-       raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
-       /* at least two bytes will be used for the encoding */
-       if (skb->len < 2)
-               return -EINVAL;
-
-       if (lowpan_fetch_skb_u8(skb, &iphc0))
-               return -EINVAL;
-
-       if (lowpan_fetch_skb_u8(skb, &iphc1))
-               return -EINVAL;
-
-       ieee802154_addr_to_sa(&sa, &hdr->source);
-       ieee802154_addr_to_sa(&da, &hdr->dest);
-
-       if (sa.addr_type == IEEE802154_ADDR_SHORT)
-               sap = &sa.short_addr;
-       else
-               sap = &sa.hwaddr;
-
-       if (da.addr_type == IEEE802154_ADDR_SHORT)
-               dap = &da.short_addr;
-       else
-               dap = &da.hwaddr;
-
-       return lowpan_header_decompress(skb, skb->dev, sap, sa.addr_type,
-                                       IEEE802154_ADDR_LEN, dap, da.addr_type,
-                                       IEEE802154_ADDR_LEN, iphc0, iphc1);
-}
-
-static struct sk_buff*
-lowpan_alloc_frag(struct sk_buff *skb, int size,
-                 const struct ieee802154_hdr *master_hdr)
-{
-       struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev;
-       struct sk_buff *frag;
-       int rc;
-
-       frag = alloc_skb(real_dev->hard_header_len +
-                        real_dev->needed_tailroom + size,
-                        GFP_ATOMIC);
-
-       if (likely(frag)) {
-               frag->dev = real_dev;
-               frag->priority = skb->priority;
-               skb_reserve(frag, real_dev->hard_header_len);
-               skb_reset_network_header(frag);
-               *mac_cb(frag) = *mac_cb(skb);
-
-               rc = dev_hard_header(frag, real_dev, 0, &master_hdr->dest,
-                                    &master_hdr->source, size);
-               if (rc < 0) {
-                       kfree_skb(frag);
-                       return ERR_PTR(rc);
-               }
-       } else {
-               frag = ERR_PTR(-ENOMEM);
-       }
-
-       return frag;
-}
-
-static int
-lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
-                    u8 *frag_hdr, int frag_hdrlen,
-                    int offset, int len)
-{
-       struct sk_buff *frag;
-
-       raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
-
-       frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
-       if (IS_ERR(frag))
-               return -PTR_ERR(frag);
-
-       memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
-       memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
-
-       raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
-
-       return dev_queue_xmit(frag);
-}
-
-static int
-lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
-                      const struct ieee802154_hdr *wpan_hdr)
-{
-       u16 dgram_size, dgram_offset;
-       __be16 frag_tag;
-       u8 frag_hdr[5];
-       int frag_cap, frag_len, payload_cap, rc;
-       int skb_unprocessed, skb_offset;
-
-       dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
-                    skb->mac_len;
-       frag_tag = htons(lowpan_dev_info(dev)->fragment_tag);
-       lowpan_dev_info(dev)->fragment_tag++;
-
-       frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
-       frag_hdr[1] = dgram_size & 0xff;
-       memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
-
-       payload_cap = ieee802154_max_payload(wpan_hdr);
-
-       frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
-                             skb_network_header_len(skb), 8);
-
-       skb_offset = skb_network_header_len(skb);
-       skb_unprocessed = skb->len - skb->mac_len - skb_offset;
-
-       rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
-                                 LOWPAN_FRAG1_HEAD_SIZE, 0,
-                                 frag_len + skb_network_header_len(skb));
-       if (rc) {
-               pr_debug("%s unable to send FRAG1 packet (tag: %d)",
-                        __func__, ntohs(frag_tag));
-               goto err;
-       }
-
-       frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
-       frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
-       frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
-
-       do {
-               dgram_offset += frag_len;
-               skb_offset += frag_len;
-               skb_unprocessed -= frag_len;
-               frag_len = min(frag_cap, skb_unprocessed);
-
-               frag_hdr[4] = dgram_offset >> 3;
-
-               rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
-                                         LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
-                                         frag_len);
-               if (rc) {
-                       pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
-                                __func__, ntohs(frag_tag), skb_offset);
-                       goto err;
-               }
-       } while (skb_unprocessed > frag_cap);
-
-       consume_skb(skb);
-       return NET_XMIT_SUCCESS;
-
-err:
-       kfree_skb(skb);
-       return rc;
-}
-
-static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
-{
-       struct ieee802154_addr sa, da;
-       struct ieee802154_mac_cb *cb = mac_cb_init(skb);
-       struct lowpan_addr_info info;
-       void *daddr, *saddr;
-
-       memcpy(&info, lowpan_skb_priv(skb), sizeof(info));
-
-       /* TODO: Currently we only support extended_addr */
-       daddr = &info.daddr.u.extended_addr;
-       saddr = &info.saddr.u.extended_addr;
-
-       lowpan_header_compress(skb, dev, ETH_P_IPV6, daddr, saddr, skb->len);
-
-       cb->type = IEEE802154_FC_TYPE_DATA;
-
-       /* prepare wpan address data */
-       sa.mode = IEEE802154_ADDR_LONG;
-       sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
-       sa.extended_addr = ieee802154_devaddr_from_raw(saddr);
-
-       /* intra-PAN communications */
-       da.pan_id = sa.pan_id;
-
-       /* if the destination address is the broadcast address, use the
-        * corresponding short address
-        */
-       if (lowpan_is_addr_broadcast((const u8 *)daddr)) {
-               da.mode = IEEE802154_ADDR_SHORT;
-               da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
-               cb->ackreq = false;
-       } else {
-               da.mode = IEEE802154_ADDR_LONG;
-               da.extended_addr = ieee802154_devaddr_from_raw(daddr);
-               cb->ackreq = true;
-       }
-
-       return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
-                       ETH_P_IPV6, (void *)&da, (void *)&sa, 0);
-}
-
-static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct ieee802154_hdr wpan_hdr;
-       int max_single, ret;
-
-       pr_debug("package xmit\n");
-
-       /* We must take a copy of the skb before we modify/replace the ipv6
-        * header as the header could be used elsewhere
-        */
-       skb = skb_unshare(skb, GFP_ATOMIC);
-       if (!skb)
-               return NET_XMIT_DROP;
-
-       ret = lowpan_header(skb, dev);
-       if (ret < 0) {
-               kfree_skb(skb);
-               return NET_XMIT_DROP;
-       }
-
-       if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
-               kfree_skb(skb);
-               return NET_XMIT_DROP;
-       }
-
-       max_single = ieee802154_max_payload(&wpan_hdr);
-
-       if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
-               skb->dev = lowpan_dev_info(dev)->real_dev;
-               return dev_queue_xmit(skb);
-       } else {
-               netdev_tx_t rc;
-
-               pr_debug("frame is too big, fragmentation is needed\n");
-               rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr);
-
-               return rc < 0 ? NET_XMIT_DROP : rc;
-       }
-}
-
-static __le16 lowpan_get_pan_id(const struct net_device *dev)
-{
-       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
-       return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
-}
-
-static __le16 lowpan_get_short_addr(const struct net_device *dev)
-{
-       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
-       return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
-}
-
-static u8 lowpan_get_dsn(const struct net_device *dev)
-{
-       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
-       return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
-}
-
-static struct header_ops lowpan_header_ops = {
-       .create = lowpan_header_create,
-};
-
-static struct lock_class_key lowpan_tx_busylock;
-static struct lock_class_key lowpan_netdev_xmit_lock_key;
-
-static void lowpan_set_lockdep_class_one(struct net_device *dev,
-                                        struct netdev_queue *txq,
-                                        void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock,
-                         &lowpan_netdev_xmit_lock_key);
-}
-
-static int lowpan_dev_init(struct net_device *dev)
-{
-       netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
-       dev->qdisc_tx_busylock = &lowpan_tx_busylock;
-       return 0;
-}
-
-static const struct net_device_ops lowpan_netdev_ops = {
-       .ndo_init               = lowpan_dev_init,
-       .ndo_start_xmit         = lowpan_xmit,
-};
-
-static struct ieee802154_mlme_ops lowpan_mlme = {
-       .get_pan_id = lowpan_get_pan_id,
-       .get_short_addr = lowpan_get_short_addr,
-       .get_dsn = lowpan_get_dsn,
-};
-
-static void lowpan_setup(struct net_device *dev)
-{
-       dev->addr_len           = IEEE802154_ADDR_LEN;
-       memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
-       dev->type               = ARPHRD_IEEE802154;
-       /* Frame Control + Sequence Number + Address fields + Security Header */
-       dev->hard_header_len    = 2 + 1 + 20 + 14;
-       dev->needed_tailroom    = 2; /* FCS */
-       dev->mtu                = IPV6_MIN_MTU;
-       dev->tx_queue_len       = 0;
-       dev->flags              = IFF_BROADCAST | IFF_MULTICAST;
-       dev->watchdog_timeo     = 0;
-
-       dev->netdev_ops         = &lowpan_netdev_ops;
-       dev->header_ops         = &lowpan_header_ops;
-       dev->ml_priv            = &lowpan_mlme;
-       dev->destructor         = free_netdev;
-}
-
-static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
-{
-       if (tb[IFLA_ADDRESS]) {
-               if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
-                       return -EINVAL;
-       }
-       return 0;
-}
-
-static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
-                     struct packet_type *pt, struct net_device *orig_dev)
-{
-       struct ieee802154_hdr hdr;
-       int ret;
-
-       skb = skb_share_check(skb, GFP_ATOMIC);
-       if (!skb)
-               goto drop;
-
-       if (!netif_running(dev))
-               goto drop_skb;
-
-       if (skb->pkt_type == PACKET_OTHERHOST)
-               goto drop_skb;
-
-       if (dev->type != ARPHRD_IEEE802154)
-               goto drop_skb;
-
-       if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
-               goto drop_skb;
-
-       /* check that it's our buffer */
-       if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
-               /* Pull off the 1-byte of 6lowpan header. */
-               skb_pull(skb, 1);
-               return lowpan_give_skb_to_devices(skb, NULL);
-       } else {
-               switch (skb->data[0] & 0xe0) {
-               case LOWPAN_DISPATCH_IPHC:      /* ipv6 datagram */
-                       ret = iphc_decompress(skb, &hdr);
-                       if (ret < 0)
-                               goto drop_skb;
-
-                       return lowpan_give_skb_to_devices(skb, NULL);
-               case LOWPAN_DISPATCH_FRAG1:     /* first fragment header */
-                       ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
-                       if (ret == 1) {
-                               ret = iphc_decompress(skb, &hdr);
-                               if (ret < 0)
-                                       goto drop_skb;
-
-                               return lowpan_give_skb_to_devices(skb, NULL);
-                       } else if (ret == -1) {
-                               return NET_RX_DROP;
-                       } else {
-                               return NET_RX_SUCCESS;
-                       }
-               case LOWPAN_DISPATCH_FRAGN:     /* next fragments headers */
-                       ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN);
-                       if (ret == 1) {
-                               ret = iphc_decompress(skb, &hdr);
-                               if (ret < 0)
-                                       goto drop_skb;
-
-                               return lowpan_give_skb_to_devices(skb, NULL);
-                       } else if (ret == -1) {
-                               return NET_RX_DROP;
-                       } else {
-                               return NET_RX_SUCCESS;
-                       }
-               default:
-                       break;
-               }
-       }
-
-drop_skb:
-       kfree_skb(skb);
-drop:
-       return NET_RX_DROP;
-}
-
-static struct packet_type lowpan_packet_type = {
-       .type = htons(ETH_P_IEEE802154),
-       .func = lowpan_rcv,
-};
-
-static int lowpan_newlink(struct net *src_net, struct net_device *dev,
-                         struct nlattr *tb[], struct nlattr *data[])
-{
-       struct net_device *real_dev;
-       struct lowpan_dev_record *entry;
-       int ret;
-
-       ASSERT_RTNL();
-
-       pr_debug("adding new link\n");
-
-       if (!tb[IFLA_LINK])
-               return -EINVAL;
-       /* find and hold real wpan device */
-       real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
-       if (!real_dev)
-               return -ENODEV;
-       if (real_dev->type != ARPHRD_IEEE802154) {
-               dev_put(real_dev);
-               return -EINVAL;
-       }
-
-       lowpan_dev_info(dev)->real_dev = real_dev;
-       mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
-
-       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-       if (!entry) {
-               dev_put(real_dev);
-               lowpan_dev_info(dev)->real_dev = NULL;
-               return -ENOMEM;
-       }
-
-       entry->ldev = dev;
-
-       /* Set the lowpan hardware address to the wpan hardware address. */
-       memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
-
-       mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
-       INIT_LIST_HEAD(&entry->list);
-       list_add_tail(&entry->list, &lowpan_devices);
-       mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
-
-       ret = register_netdevice(dev);
-       if (ret >= 0) {
-               if (!lowpan_open_count)
-                       dev_add_pack(&lowpan_packet_type);
-               lowpan_open_count++;
-       }
-
-       return ret;
-}
-
-static void lowpan_dellink(struct net_device *dev, struct list_head *head)
-{
-       struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
-       struct net_device *real_dev = lowpan_dev->real_dev;
-       struct lowpan_dev_record *entry, *tmp;
-
-       ASSERT_RTNL();
-
-       lowpan_open_count--;
-       if (!lowpan_open_count)
-               dev_remove_pack(&lowpan_packet_type);
-
-       mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
-       list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
-               if (entry->ldev == dev) {
-                       list_del(&entry->list);
-                       kfree(entry);
-               }
-       }
-       mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
-
-       mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
-
-       unregister_netdevice_queue(dev, head);
-
-       dev_put(real_dev);
-}
-
-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
-       .kind           = "lowpan",
-       .priv_size      = sizeof(struct lowpan_dev_info),
-       .setup          = lowpan_setup,
-       .newlink        = lowpan_newlink,
-       .dellink        = lowpan_dellink,
-       .validate       = lowpan_validate,
-};
-
-static inline int __init lowpan_netlink_init(void)
-{
-       return rtnl_link_register(&lowpan_link_ops);
-}
-
-static inline void lowpan_netlink_fini(void)
-{
-       rtnl_link_unregister(&lowpan_link_ops);
-}
-
-static int lowpan_device_event(struct notifier_block *unused,
-                              unsigned long event, void *ptr)
-{
-       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       LIST_HEAD(del_list);
-       struct lowpan_dev_record *entry, *tmp;
-
-       if (dev->type != ARPHRD_IEEE802154)
-               goto out;
-
-       if (event == NETDEV_UNREGISTER) {
-               list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
-                       if (lowpan_dev_info(entry->ldev)->real_dev == dev)
-                               lowpan_dellink(entry->ldev, &del_list);
-               }
-
-               unregister_netdevice_many(&del_list);
-       }
-
-out:
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block lowpan_dev_notifier = {
-       .notifier_call = lowpan_device_event,
-};
-
-static int __init lowpan_init_module(void)
-{
-       int err = 0;
-
-       err = lowpan_net_frag_init();
-       if (err < 0)
-               goto out;
-
-       err = lowpan_netlink_init();
-       if (err < 0)
-               goto out_frag;
-
-       err = register_netdevice_notifier(&lowpan_dev_notifier);
-       if (err < 0)
-               goto out_pack;
-
-       return 0;
-
-out_pack:
-       lowpan_netlink_fini();
-out_frag:
-       lowpan_net_frag_exit();
-out:
-       return err;
-}
-
-static void __exit lowpan_cleanup_module(void)
-{
-       lowpan_netlink_fini();
-
-       lowpan_net_frag_exit();
-
-       unregister_netdevice_notifier(&lowpan_dev_notifier);
-}
-
-module_init(lowpan_init_module);
-module_exit(lowpan_cleanup_module);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_RTNL_LINK("lowpan");
index c0d4154d144f1eeac5418a048a0ccded798c3419..1370d5b0041b952cf74c272c2494f8c0150c893b 100644 (file)
@@ -1,4 +1,4 @@
-config IEEE802154
+menuconfig IEEE802154
        tristate "IEEE Std 802.15.4 Low-Rate Wireless Personal Area Networks support"
        ---help---
          IEEE Std 802.15.4 defines a low data rate, low power and low
@@ -10,8 +10,16 @@ config IEEE802154
          Say Y here to compile LR-WPAN support into the kernel or say M to
          compile it as modules.
 
-config IEEE802154_6LOWPAN
-       tristate "6lowpan support over IEEE 802.15.4"
-       depends on IEEE802154 && 6LOWPAN
+if IEEE802154
+
+config IEEE802154_SOCKET
+       tristate "IEEE 802.15.4 socket interface"
+       default y
        ---help---
-         IPv6 compression over IEEE 802.15.4.
+         Socket interface for IEEE 802.15.4. Contains DGRAM sockets interface
+         for 802.15.4 dataframes. Also RAW socket interface to build MAC
+         header from userspace.
+
+source "net/ieee802154/6lowpan/Kconfig"
+
+endif
index 9f6970f2a28b9e9d6840ccbef56efdf3d19b2531..05dab2957cd49e9be95c6bbe1cef1eb9f35ab8a2 100644 (file)
@@ -1,9 +1,9 @@
-obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
-obj-$(CONFIG_IEEE802154_6LOWPAN) += ieee802154_6lowpan.o
+obj-$(CONFIG_IEEE802154) += ieee802154.o
+obj-$(CONFIG_IEEE802154_SOCKET) += ieee802154_socket.o
+obj-y += 6lowpan/
 
-ieee802154_6lowpan-y := 6lowpan_rtnl.o reassembly.o
 ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o core.o \
                 header_ops.o sysfs.o nl802154.o
-af_802154-y := af_ieee802154.o raw.o dgram.o
+ieee802154_socket-y := socket.o
 
 ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/ieee802154/af802154.h b/net/ieee802154/af802154.h
deleted file mode 100644 (file)
index 343b63e..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Internal interfaces for ieee 802.15.4 address family.
- *
- * Copyright 2007, 2008, 2009 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Written by:
- * Sergey Lapin <slapin@ossfans.org>
- * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
- */
-
-#ifndef AF802154_H
-#define AF802154_H
-
-struct sk_buff;
-struct net_device;
-struct ieee802154_addr;
-extern struct proto ieee802154_raw_prot;
-extern struct proto ieee802154_dgram_prot;
-void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb);
-int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb);
-struct net_device *ieee802154_get_dev(struct net *net,
-                                     const struct ieee802154_addr *addr);
-
-#endif
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
deleted file mode 100644 (file)
index d0a1282..0000000
+++ /dev/null
@@ -1,369 +0,0 @@
-/*
- * IEEE802154.4 socket interface
- *
- * Copyright 2007, 2008 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Written by:
- * Sergey Lapin <slapin@ossfans.org>
- * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
- */
-
-#include <linux/net.h>
-#include <linux/capability.h>
-#include <linux/module.h>
-#include <linux/if_arp.h>
-#include <linux/if.h>
-#include <linux/termios.h>     /* For TIOCOUTQ/INQ */
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <net/datalink.h>
-#include <net/psnap.h>
-#include <net/sock.h>
-#include <net/tcp_states.h>
-#include <net/route.h>
-
-#include <net/af_ieee802154.h>
-#include <net/ieee802154_netdev.h>
-
-#include "af802154.h"
-
-/* Utility function for families */
-struct net_device*
-ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
-{
-       struct net_device *dev = NULL;
-       struct net_device *tmp;
-       __le16 pan_id, short_addr;
-       u8 hwaddr[IEEE802154_ADDR_LEN];
-
-       switch (addr->mode) {
-       case IEEE802154_ADDR_LONG:
-               ieee802154_devaddr_to_raw(hwaddr, addr->extended_addr);
-               rcu_read_lock();
-               dev = dev_getbyhwaddr_rcu(net, ARPHRD_IEEE802154, hwaddr);
-               if (dev)
-                       dev_hold(dev);
-               rcu_read_unlock();
-               break;
-       case IEEE802154_ADDR_SHORT:
-               if (addr->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST) ||
-                   addr->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) ||
-                   addr->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST))
-                       break;
-
-               rtnl_lock();
-
-               for_each_netdev(net, tmp) {
-                       if (tmp->type != ARPHRD_IEEE802154)
-                               continue;
-
-                       pan_id = ieee802154_mlme_ops(tmp)->get_pan_id(tmp);
-                       short_addr =
-                               ieee802154_mlme_ops(tmp)->get_short_addr(tmp);
-
-                       if (pan_id == addr->pan_id &&
-                           short_addr == addr->short_addr) {
-                               dev = tmp;
-                               dev_hold(dev);
-                               break;
-                       }
-               }
-
-               rtnl_unlock();
-               break;
-       default:
-               pr_warn("Unsupported ieee802154 address type: %d\n",
-                       addr->mode);
-               break;
-       }
-
-       return dev;
-}
-
-static int ieee802154_sock_release(struct socket *sock)
-{
-       struct sock *sk = sock->sk;
-
-       if (sk) {
-               sock->sk = NULL;
-               sk->sk_prot->close(sk, 0);
-       }
-       return 0;
-}
-
-static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
-                                  struct msghdr *msg, size_t len)
-{
-       struct sock *sk = sock->sk;
-
-       return sk->sk_prot->sendmsg(iocb, sk, msg, len);
-}
-
-static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr,
-                               int addr_len)
-{
-       struct sock *sk = sock->sk;
-
-       if (sk->sk_prot->bind)
-               return sk->sk_prot->bind(sk, uaddr, addr_len);
-
-       return sock_no_bind(sock, uaddr, addr_len);
-}
-
-static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
-                                  int addr_len, int flags)
-{
-       struct sock *sk = sock->sk;
-
-       if (addr_len < sizeof(uaddr->sa_family))
-               return -EINVAL;
-
-       if (uaddr->sa_family == AF_UNSPEC)
-               return sk->sk_prot->disconnect(sk, flags);
-
-       return sk->sk_prot->connect(sk, uaddr, addr_len);
-}
-
-static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
-                               unsigned int cmd)
-{
-       struct ifreq ifr;
-       int ret = -ENOIOCTLCMD;
-       struct net_device *dev;
-
-       if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
-               return -EFAULT;
-
-       ifr.ifr_name[IFNAMSIZ-1] = 0;
-
-       dev_load(sock_net(sk), ifr.ifr_name);
-       dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
-
-       if (!dev)
-               return -ENODEV;
-
-       if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl)
-               ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
-
-       if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq)))
-               ret = -EFAULT;
-       dev_put(dev);
-
-       return ret;
-}
-
-static int ieee802154_sock_ioctl(struct socket *sock, unsigned int cmd,
-                                unsigned long arg)
-{
-       struct sock *sk = sock->sk;
-
-       switch (cmd) {
-       case SIOCGSTAMP:
-               return sock_get_timestamp(sk, (struct timeval __user *)arg);
-       case SIOCGSTAMPNS:
-               return sock_get_timestampns(sk, (struct timespec __user *)arg);
-       case SIOCGIFADDR:
-       case SIOCSIFADDR:
-               return ieee802154_dev_ioctl(sk, (struct ifreq __user *)arg,
-                               cmd);
-       default:
-               if (!sk->sk_prot->ioctl)
-                       return -ENOIOCTLCMD;
-               return sk->sk_prot->ioctl(sk, cmd, arg);
-       }
-}
-
-static const struct proto_ops ieee802154_raw_ops = {
-       .family            = PF_IEEE802154,
-       .owner             = THIS_MODULE,
-       .release           = ieee802154_sock_release,
-       .bind              = ieee802154_sock_bind,
-       .connect           = ieee802154_sock_connect,
-       .socketpair        = sock_no_socketpair,
-       .accept            = sock_no_accept,
-       .getname           = sock_no_getname,
-       .poll              = datagram_poll,
-       .ioctl             = ieee802154_sock_ioctl,
-       .listen            = sock_no_listen,
-       .shutdown          = sock_no_shutdown,
-       .setsockopt        = sock_common_setsockopt,
-       .getsockopt        = sock_common_getsockopt,
-       .sendmsg           = ieee802154_sock_sendmsg,
-       .recvmsg           = sock_common_recvmsg,
-       .mmap              = sock_no_mmap,
-       .sendpage          = sock_no_sendpage,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
-#endif
-};
-
-static const struct proto_ops ieee802154_dgram_ops = {
-       .family            = PF_IEEE802154,
-       .owner             = THIS_MODULE,
-       .release           = ieee802154_sock_release,
-       .bind              = ieee802154_sock_bind,
-       .connect           = ieee802154_sock_connect,
-       .socketpair        = sock_no_socketpair,
-       .accept            = sock_no_accept,
-       .getname           = sock_no_getname,
-       .poll              = datagram_poll,
-       .ioctl             = ieee802154_sock_ioctl,
-       .listen            = sock_no_listen,
-       .shutdown          = sock_no_shutdown,
-       .setsockopt        = sock_common_setsockopt,
-       .getsockopt        = sock_common_getsockopt,
-       .sendmsg           = ieee802154_sock_sendmsg,
-       .recvmsg           = sock_common_recvmsg,
-       .mmap              = sock_no_mmap,
-       .sendpage          = sock_no_sendpage,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
-#endif
-};
-
-/* Create a socket. Initialise the socket, blank the addresses
- * set the state.
- */
-static int ieee802154_create(struct net *net, struct socket *sock,
-                            int protocol, int kern)
-{
-       struct sock *sk;
-       int rc;
-       struct proto *proto;
-       const struct proto_ops *ops;
-
-       if (!net_eq(net, &init_net))
-               return -EAFNOSUPPORT;
-
-       switch (sock->type) {
-       case SOCK_RAW:
-               proto = &ieee802154_raw_prot;
-               ops = &ieee802154_raw_ops;
-               break;
-       case SOCK_DGRAM:
-               proto = &ieee802154_dgram_prot;
-               ops = &ieee802154_dgram_ops;
-               break;
-       default:
-               rc = -ESOCKTNOSUPPORT;
-               goto out;
-       }
-
-       rc = -ENOMEM;
-       sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto);
-       if (!sk)
-               goto out;
-       rc = 0;
-
-       sock->ops = ops;
-
-       sock_init_data(sock, sk);
-       /* FIXME: sk->sk_destruct */
-       sk->sk_family = PF_IEEE802154;
-
-       /* Checksums on by default */
-       sock_set_flag(sk, SOCK_ZAPPED);
-
-       if (sk->sk_prot->hash)
-               sk->sk_prot->hash(sk);
-
-       if (sk->sk_prot->init) {
-               rc = sk->sk_prot->init(sk);
-               if (rc)
-                       sk_common_release(sk);
-       }
-out:
-       return rc;
-}
-
-static const struct net_proto_family ieee802154_family_ops = {
-       .family         = PF_IEEE802154,
-       .create         = ieee802154_create,
-       .owner          = THIS_MODULE,
-};
-
-static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
-                         struct packet_type *pt, struct net_device *orig_dev)
-{
-       if (!netif_running(dev))
-               goto drop;
-       pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
-#ifdef DEBUG
-       print_hex_dump_bytes("ieee802154_rcv ",
-                            DUMP_PREFIX_NONE, skb->data, skb->len);
-#endif
-
-       if (!net_eq(dev_net(dev), &init_net))
-               goto drop;
-
-       ieee802154_raw_deliver(dev, skb);
-
-       if (dev->type != ARPHRD_IEEE802154)
-               goto drop;
-
-       if (skb->pkt_type != PACKET_OTHERHOST)
-               return ieee802154_dgram_deliver(dev, skb);
-
-drop:
-       kfree_skb(skb);
-       return NET_RX_DROP;
-}
-
-static struct packet_type ieee802154_packet_type = {
-       .type = htons(ETH_P_IEEE802154),
-       .func = ieee802154_rcv,
-};
-
-static int __init af_ieee802154_init(void)
-{
-       int rc = -EINVAL;
-
-       rc = proto_register(&ieee802154_raw_prot, 1);
-       if (rc)
-               goto out;
-
-       rc = proto_register(&ieee802154_dgram_prot, 1);
-       if (rc)
-               goto err_dgram;
-
-       /* Tell SOCKET that we are alive */
-       rc = sock_register(&ieee802154_family_ops);
-       if (rc)
-               goto err_sock;
-       dev_add_pack(&ieee802154_packet_type);
-
-       rc = 0;
-       goto out;
-
-err_sock:
-       proto_unregister(&ieee802154_dgram_prot);
-err_dgram:
-       proto_unregister(&ieee802154_raw_prot);
-out:
-       return rc;
-}
-
-static void __exit af_ieee802154_remove(void)
-{
-       dev_remove_pack(&ieee802154_packet_type);
-       sock_unregister(PF_IEEE802154);
-       proto_unregister(&ieee802154_dgram_prot);
-       proto_unregister(&ieee802154_raw_prot);
-}
-
-module_init(af_ieee802154_init);
-module_exit(af_ieee802154_remove);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_NETPROTO(PF_IEEE802154);
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
deleted file mode 100644 (file)
index d1930b7..0000000
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
- * IEEE 802.15.4 dgram socket interface
- *
- * Copyright 2007, 2008 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Written by:
- * Sergey Lapin <slapin@ossfans.org>
- * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
- */
-
-#include <linux/capability.h>
-#include <linux/net.h>
-#include <linux/module.h>
-#include <linux/if_arp.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/ieee802154.h>
-#include <net/sock.h>
-#include <net/af_ieee802154.h>
-#include <net/ieee802154_netdev.h>
-
-#include <asm/ioctls.h>
-
-#include "af802154.h"
-
-static HLIST_HEAD(dgram_head);
-static DEFINE_RWLOCK(dgram_lock);
-
-struct dgram_sock {
-       struct sock sk;
-
-       struct ieee802154_addr src_addr;
-       struct ieee802154_addr dst_addr;
-
-       unsigned int bound:1;
-       unsigned int connected:1;
-       unsigned int want_ack:1;
-       unsigned int secen:1;
-       unsigned int secen_override:1;
-       unsigned int seclevel:3;
-       unsigned int seclevel_override:1;
-};
-
-static inline struct dgram_sock *dgram_sk(const struct sock *sk)
-{
-       return container_of(sk, struct dgram_sock, sk);
-}
-
-static void dgram_hash(struct sock *sk)
-{
-       write_lock_bh(&dgram_lock);
-       sk_add_node(sk, &dgram_head);
-       sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
-       write_unlock_bh(&dgram_lock);
-}
-
-static void dgram_unhash(struct sock *sk)
-{
-       write_lock_bh(&dgram_lock);
-       if (sk_del_node_init(sk))
-               sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
-       write_unlock_bh(&dgram_lock);
-}
-
-static int dgram_init(struct sock *sk)
-{
-       struct dgram_sock *ro = dgram_sk(sk);
-
-       ro->want_ack = 1;
-       return 0;
-}
-
-static void dgram_close(struct sock *sk, long timeout)
-{
-       sk_common_release(sk);
-}
-
-static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
-{
-       struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
-       struct ieee802154_addr haddr;
-       struct dgram_sock *ro = dgram_sk(sk);
-       int err = -EINVAL;
-       struct net_device *dev;
-
-       lock_sock(sk);
-
-       ro->bound = 0;
-
-       if (len < sizeof(*addr))
-               goto out;
-
-       if (addr->family != AF_IEEE802154)
-               goto out;
-
-       ieee802154_addr_from_sa(&haddr, &addr->addr);
-       dev = ieee802154_get_dev(sock_net(sk), &haddr);
-       if (!dev) {
-               err = -ENODEV;
-               goto out;
-       }
-
-       if (dev->type != ARPHRD_IEEE802154) {
-               err = -ENODEV;
-               goto out_put;
-       }
-
-       ro->src_addr = haddr;
-
-       ro->bound = 1;
-       err = 0;
-out_put:
-       dev_put(dev);
-out:
-       release_sock(sk);
-
-       return err;
-}
-
-static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
-{
-       switch (cmd) {
-       case SIOCOUTQ:
-       {
-               int amount = sk_wmem_alloc_get(sk);
-
-               return put_user(amount, (int __user *)arg);
-       }
-
-       case SIOCINQ:
-       {
-               struct sk_buff *skb;
-               unsigned long amount;
-
-               amount = 0;
-               spin_lock_bh(&sk->sk_receive_queue.lock);
-               skb = skb_peek(&sk->sk_receive_queue);
-               if (skb != NULL) {
-                       /* We will only return the amount
-                        * of this packet since that is all
-                        * that will be read.
-                        */
-                       amount = skb->len - ieee802154_hdr_length(skb);
-               }
-               spin_unlock_bh(&sk->sk_receive_queue.lock);
-               return put_user(amount, (int __user *)arg);
-       }
-       }
-
-       return -ENOIOCTLCMD;
-}
-
-/* FIXME: autobind */
-static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
-                        int len)
-{
-       struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
-       struct dgram_sock *ro = dgram_sk(sk);
-       int err = 0;
-
-       if (len < sizeof(*addr))
-               return -EINVAL;
-
-       if (addr->family != AF_IEEE802154)
-               return -EINVAL;
-
-       lock_sock(sk);
-
-       if (!ro->bound) {
-               err = -ENETUNREACH;
-               goto out;
-       }
-
-       ieee802154_addr_from_sa(&ro->dst_addr, &addr->addr);
-       ro->connected = 1;
-
-out:
-       release_sock(sk);
-       return err;
-}
-
-static int dgram_disconnect(struct sock *sk, int flags)
-{
-       struct dgram_sock *ro = dgram_sk(sk);
-
-       lock_sock(sk);
-       ro->connected = 0;
-       release_sock(sk);
-
-       return 0;
-}
-
-static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
-                        struct msghdr *msg, size_t size)
-{
-       struct net_device *dev;
-       unsigned int mtu;
-       struct sk_buff *skb;
-       struct ieee802154_mac_cb *cb;
-       struct dgram_sock *ro = dgram_sk(sk);
-       struct ieee802154_addr dst_addr;
-       int hlen, tlen;
-       int err;
-
-       if (msg->msg_flags & MSG_OOB) {
-               pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags);
-               return -EOPNOTSUPP;
-       }
-
-       if (!ro->connected && !msg->msg_name)
-               return -EDESTADDRREQ;
-       else if (ro->connected && msg->msg_name)
-               return -EISCONN;
-
-       if (!ro->bound)
-               dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
-       else
-               dev = ieee802154_get_dev(sock_net(sk), &ro->src_addr);
-
-       if (!dev) {
-               pr_debug("no dev\n");
-               err = -ENXIO;
-               goto out;
-       }
-       mtu = dev->mtu;
-       pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
-
-       if (size > mtu) {
-               pr_debug("size = %Zu, mtu = %u\n", size, mtu);
-               err = -EMSGSIZE;
-               goto out_dev;
-       }
-
-       hlen = LL_RESERVED_SPACE(dev);
-       tlen = dev->needed_tailroom;
-       skb = sock_alloc_send_skb(sk, hlen + tlen + size,
-                                 msg->msg_flags & MSG_DONTWAIT,
-                                 &err);
-       if (!skb)
-               goto out_dev;
-
-       skb_reserve(skb, hlen);
-
-       skb_reset_network_header(skb);
-
-       cb = mac_cb_init(skb);
-       cb->type = IEEE802154_FC_TYPE_DATA;
-       cb->ackreq = ro->want_ack;
-
-       if (msg->msg_name) {
-               DECLARE_SOCKADDR(struct sockaddr_ieee802154*,
-                                daddr, msg->msg_name);
-
-               ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
-       } else {
-               dst_addr = ro->dst_addr;
-       }
-
-       cb->secen = ro->secen;
-       cb->secen_override = ro->secen_override;
-       cb->seclevel = ro->seclevel;
-       cb->seclevel_override = ro->seclevel_override;
-
-       err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &dst_addr,
-                             ro->bound ? &ro->src_addr : NULL, size);
-       if (err < 0)
-               goto out_skb;
-
-       err = memcpy_from_msg(skb_put(skb, size), msg, size);
-       if (err < 0)
-               goto out_skb;
-
-       skb->dev = dev;
-       skb->sk  = sk;
-       skb->protocol = htons(ETH_P_IEEE802154);
-
-       dev_put(dev);
-
-       err = dev_queue_xmit(skb);
-       if (err > 0)
-               err = net_xmit_errno(err);
-
-       return err ?: size;
-
-out_skb:
-       kfree_skb(skb);
-out_dev:
-       dev_put(dev);
-out:
-       return err;
-}
-
-static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
-                        struct msghdr *msg, size_t len, int noblock,
-                        int flags, int *addr_len)
-{
-       size_t copied = 0;
-       int err = -EOPNOTSUPP;
-       struct sk_buff *skb;
-       DECLARE_SOCKADDR(struct sockaddr_ieee802154 *, saddr, msg->msg_name);
-
-       skb = skb_recv_datagram(sk, flags, noblock, &err);
-       if (!skb)
-               goto out;
-
-       copied = skb->len;
-       if (len < copied) {
-               msg->msg_flags |= MSG_TRUNC;
-               copied = len;
-       }
-
-       /* FIXME: skip headers if necessary ?! */
-       err = skb_copy_datagram_msg(skb, 0, msg, copied);
-       if (err)
-               goto done;
-
-       sock_recv_ts_and_drops(msg, sk, skb);
-
-       if (saddr) {
-               saddr->family = AF_IEEE802154;
-               ieee802154_addr_to_sa(&saddr->addr, &mac_cb(skb)->source);
-               *addr_len = sizeof(*saddr);
-       }
-
-       if (flags & MSG_TRUNC)
-               copied = skb->len;
-done:
-       skb_free_datagram(sk, skb);
-out:
-       if (err)
-               return err;
-       return copied;
-}
-
-static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
-{
-       skb = skb_share_check(skb, GFP_ATOMIC);
-       if (!skb)
-               return NET_RX_DROP;
-
-       if (sock_queue_rcv_skb(sk, skb) < 0) {
-               kfree_skb(skb);
-               return NET_RX_DROP;
-       }
-
-       return NET_RX_SUCCESS;
-}
-
-static inline bool
-ieee802154_match_sock(__le64 hw_addr, __le16 pan_id, __le16 short_addr,
-                     struct dgram_sock *ro)
-{
-       if (!ro->bound)
-               return true;
-
-       if (ro->src_addr.mode == IEEE802154_ADDR_LONG &&
-           hw_addr == ro->src_addr.extended_addr)
-               return true;
-
-       if (ro->src_addr.mode == IEEE802154_ADDR_SHORT &&
-           pan_id == ro->src_addr.pan_id &&
-           short_addr == ro->src_addr.short_addr)
-               return true;
-
-       return false;
-}
-
-int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
-{
-       struct sock *sk, *prev = NULL;
-       int ret = NET_RX_SUCCESS;
-       __le16 pan_id, short_addr;
-       __le64 hw_addr;
-
-       /* Data frame processing */
-       BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-       pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
-       short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
-       hw_addr = ieee802154_devaddr_from_raw(dev->dev_addr);
-
-       read_lock(&dgram_lock);
-       sk_for_each(sk, &dgram_head) {
-               if (ieee802154_match_sock(hw_addr, pan_id, short_addr,
-                                         dgram_sk(sk))) {
-                       if (prev) {
-                               struct sk_buff *clone;
-
-                               clone = skb_clone(skb, GFP_ATOMIC);
-                               if (clone)
-                                       dgram_rcv_skb(prev, clone);
-                       }
-
-                       prev = sk;
-               }
-       }
-
-       if (prev) {
-               dgram_rcv_skb(prev, skb);
-       } else {
-               kfree_skb(skb);
-               ret = NET_RX_DROP;
-       }
-       read_unlock(&dgram_lock);
-
-       return ret;
-}
-
-static int dgram_getsockopt(struct sock *sk, int level, int optname,
-                           char __user *optval, int __user *optlen)
-{
-       struct dgram_sock *ro = dgram_sk(sk);
-
-       int val, len;
-
-       if (level != SOL_IEEE802154)
-               return -EOPNOTSUPP;
-
-       if (get_user(len, optlen))
-               return -EFAULT;
-
-       len = min_t(unsigned int, len, sizeof(int));
-
-       switch (optname) {
-       case WPAN_WANTACK:
-               val = ro->want_ack;
-               break;
-       case WPAN_SECURITY:
-               if (!ro->secen_override)
-                       val = WPAN_SECURITY_DEFAULT;
-               else if (ro->secen)
-                       val = WPAN_SECURITY_ON;
-               else
-                       val = WPAN_SECURITY_OFF;
-               break;
-       case WPAN_SECURITY_LEVEL:
-               if (!ro->seclevel_override)
-                       val = WPAN_SECURITY_LEVEL_DEFAULT;
-               else
-                       val = ro->seclevel;
-               break;
-       default:
-               return -ENOPROTOOPT;
-       }
-
-       if (put_user(len, optlen))
-               return -EFAULT;
-       if (copy_to_user(optval, &val, len))
-               return -EFAULT;
-       return 0;
-}
-
-static int dgram_setsockopt(struct sock *sk, int level, int optname,
-                           char __user *optval, unsigned int optlen)
-{
-       struct dgram_sock *ro = dgram_sk(sk);
-       struct net *net = sock_net(sk);
-       int val;
-       int err = 0;
-
-       if (optlen < sizeof(int))
-               return -EINVAL;
-
-       if (get_user(val, (int __user *)optval))
-               return -EFAULT;
-
-       lock_sock(sk);
-
-       switch (optname) {
-       case WPAN_WANTACK:
-               ro->want_ack = !!val;
-               break;
-       case WPAN_SECURITY:
-               if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
-                   !ns_capable(net->user_ns, CAP_NET_RAW)) {
-                       err = -EPERM;
-                       break;
-               }
-
-               switch (val) {
-               case WPAN_SECURITY_DEFAULT:
-                       ro->secen_override = 0;
-                       break;
-               case WPAN_SECURITY_ON:
-                       ro->secen_override = 1;
-                       ro->secen = 1;
-                       break;
-               case WPAN_SECURITY_OFF:
-                       ro->secen_override = 1;
-                       ro->secen = 0;
-                       break;
-               default:
-                       err = -EINVAL;
-                       break;
-               }
-               break;
-       case WPAN_SECURITY_LEVEL:
-               if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
-                   !ns_capable(net->user_ns, CAP_NET_RAW)) {
-                       err = -EPERM;
-                       break;
-               }
-
-               if (val < WPAN_SECURITY_LEVEL_DEFAULT ||
-                   val > IEEE802154_SCF_SECLEVEL_ENC_MIC128) {
-                       err = -EINVAL;
-               } else if (val == WPAN_SECURITY_LEVEL_DEFAULT) {
-                       ro->seclevel_override = 0;
-               } else {
-                       ro->seclevel_override = 1;
-                       ro->seclevel = val;
-               }
-               break;
-       default:
-               err = -ENOPROTOOPT;
-               break;
-       }
-
-       release_sock(sk);
-       return err;
-}
-
-struct proto ieee802154_dgram_prot = {
-       .name           = "IEEE-802.15.4-MAC",
-       .owner          = THIS_MODULE,
-       .obj_size       = sizeof(struct dgram_sock),
-       .init           = dgram_init,
-       .close          = dgram_close,
-       .bind           = dgram_bind,
-       .sendmsg        = dgram_sendmsg,
-       .recvmsg        = dgram_recvmsg,
-       .hash           = dgram_hash,
-       .unhash         = dgram_unhash,
-       .connect        = dgram_connect,
-       .disconnect     = dgram_disconnect,
-       .ioctl          = dgram_ioctl,
-       .getsockopt     = dgram_getsockopt,
-       .setsockopt     = dgram_setsockopt,
-};
-
index fa1464762d0dafdc481b68a7cbf46cfd45d8b30a..c8133c07ceee4ce29411a5f9ea47c0c529231223 100644 (file)
@@ -63,13 +63,9 @@ int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group)
        struct nlmsghdr *nlh = nlmsg_hdr(msg);
        void *hdr = genlmsg_data(nlmsg_data(nlh));
 
-       if (genlmsg_end(msg, hdr) < 0)
-               goto out;
+       genlmsg_end(msg, hdr);
 
        return genlmsg_multicast(&nl802154_family, msg, 0, group, GFP_ATOMIC);
-out:
-       nlmsg_free(msg);
-       return -ENOBUFS;
 }
 
 struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
@@ -96,13 +92,9 @@ int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info)
        struct nlmsghdr *nlh = nlmsg_hdr(msg);
        void *hdr = genlmsg_data(nlmsg_data(nlh));
 
-       if (genlmsg_end(msg, hdr) < 0)
-               goto out;
+       genlmsg_end(msg, hdr);
 
        return genlmsg_reply(msg, info);
-out:
-       nlmsg_free(msg);
-       return -ENOBUFS;
 }
 
 static const struct genl_ops ieee8021154_ops[] = {
index 3c902e9516fb69dc94508ac1f77cda79d4095d16..9105265920fe735db77650607bd3fe89d143869a 100644 (file)
@@ -136,7 +136,8 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
        }
 
        wpan_phy_put(phy);
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
 nla_put_failure:
        wpan_phy_put(phy);
index 7baf98b146116bd52f961c371d31a33878c0b8d8..1b9d25f6e898616d7972950692bcd1eab71ddb26 100644 (file)
@@ -65,7 +65,8 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
                goto nla_put_failure;
        mutex_unlock(&phy->pib_lock);
        kfree(buf);
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
 nla_put_failure:
        mutex_unlock(&phy->pib_lock);
index a25b9bbd077be000d2e1de99bf795ac8e1fc0897..a4daf91b8d0a395d6964dad93cf292e566dee22c 100644 (file)
@@ -306,7 +306,8 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
                goto nla_put_failure;
 
 finish:
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
 nla_put_failure:
        genlmsg_cancel(msg, hdr);
@@ -489,7 +490,8 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
        if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt))
                goto nla_put_failure;
 
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
 nla_put_failure:
        genlmsg_cancel(msg, hdr);
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
deleted file mode 100644 (file)
index 1674b11..0000000
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Raw IEEE 802.15.4 sockets
- *
- * Copyright 2007, 2008 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Written by:
- * Sergey Lapin <slapin@ossfans.org>
- * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
- */
-
-#include <linux/net.h>
-#include <linux/module.h>
-#include <linux/if_arp.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <net/sock.h>
-#include <net/af_ieee802154.h>
-#include <net/ieee802154_netdev.h>
-
-#include "af802154.h"
-
-static HLIST_HEAD(raw_head);
-static DEFINE_RWLOCK(raw_lock);
-
-static void raw_hash(struct sock *sk)
-{
-       write_lock_bh(&raw_lock);
-       sk_add_node(sk, &raw_head);
-       sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
-       write_unlock_bh(&raw_lock);
-}
-
-static void raw_unhash(struct sock *sk)
-{
-       write_lock_bh(&raw_lock);
-       if (sk_del_node_init(sk))
-               sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
-       write_unlock_bh(&raw_lock);
-}
-
-static void raw_close(struct sock *sk, long timeout)
-{
-       sk_common_release(sk);
-}
-
-static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len)
-{
-       struct ieee802154_addr addr;
-       struct sockaddr_ieee802154 *uaddr = (struct sockaddr_ieee802154 *)_uaddr;
-       int err = 0;
-       struct net_device *dev = NULL;
-
-       if (len < sizeof(*uaddr))
-               return -EINVAL;
-
-       uaddr = (struct sockaddr_ieee802154 *)_uaddr;
-       if (uaddr->family != AF_IEEE802154)
-               return -EINVAL;
-
-       lock_sock(sk);
-
-       ieee802154_addr_from_sa(&addr, &uaddr->addr);
-       dev = ieee802154_get_dev(sock_net(sk), &addr);
-       if (!dev) {
-               err = -ENODEV;
-               goto out;
-       }
-
-       if (dev->type != ARPHRD_IEEE802154) {
-               err = -ENODEV;
-               goto out_put;
-       }
-
-       sk->sk_bound_dev_if = dev->ifindex;
-       sk_dst_reset(sk);
-
-out_put:
-       dev_put(dev);
-out:
-       release_sock(sk);
-
-       return err;
-}
-
-static int raw_connect(struct sock *sk, struct sockaddr *uaddr,
-                      int addr_len)
-{
-       return -ENOTSUPP;
-}
-
-static int raw_disconnect(struct sock *sk, int flags)
-{
-       return 0;
-}
-
-static int raw_sendmsg(struct kiocb *iocb, struct sock *sk,
-                      struct msghdr *msg, size_t size)
-{
-       struct net_device *dev;
-       unsigned int mtu;
-       struct sk_buff *skb;
-       int hlen, tlen;
-       int err;
-
-       if (msg->msg_flags & MSG_OOB) {
-               pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags);
-               return -EOPNOTSUPP;
-       }
-
-       lock_sock(sk);
-       if (!sk->sk_bound_dev_if)
-               dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
-       else
-               dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if);
-       release_sock(sk);
-
-       if (!dev) {
-               pr_debug("no dev\n");
-               err = -ENXIO;
-               goto out;
-       }
-
-       mtu = dev->mtu;
-       pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
-
-       if (size > mtu) {
-               pr_debug("size = %Zu, mtu = %u\n", size, mtu);
-               err = -EINVAL;
-               goto out_dev;
-       }
-
-       hlen = LL_RESERVED_SPACE(dev);
-       tlen = dev->needed_tailroom;
-       skb = sock_alloc_send_skb(sk, hlen + tlen + size,
-                                 msg->msg_flags & MSG_DONTWAIT, &err);
-       if (!skb)
-               goto out_dev;
-
-       skb_reserve(skb, hlen);
-
-       skb_reset_mac_header(skb);
-       skb_reset_network_header(skb);
-
-       err = memcpy_from_msg(skb_put(skb, size), msg, size);
-       if (err < 0)
-               goto out_skb;
-
-       skb->dev = dev;
-       skb->sk  = sk;
-       skb->protocol = htons(ETH_P_IEEE802154);
-
-       dev_put(dev);
-
-       err = dev_queue_xmit(skb);
-       if (err > 0)
-               err = net_xmit_errno(err);
-
-       return err ?: size;
-
-out_skb:
-       kfree_skb(skb);
-out_dev:
-       dev_put(dev);
-out:
-       return err;
-}
-
-static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                      size_t len, int noblock, int flags, int *addr_len)
-{
-       size_t copied = 0;
-       int err = -EOPNOTSUPP;
-       struct sk_buff *skb;
-
-       skb = skb_recv_datagram(sk, flags, noblock, &err);
-       if (!skb)
-               goto out;
-
-       copied = skb->len;
-       if (len < copied) {
-               msg->msg_flags |= MSG_TRUNC;
-               copied = len;
-       }
-
-       err = skb_copy_datagram_msg(skb, 0, msg, copied);
-       if (err)
-               goto done;
-
-       sock_recv_ts_and_drops(msg, sk, skb);
-
-       if (flags & MSG_TRUNC)
-               copied = skb->len;
-done:
-       skb_free_datagram(sk, skb);
-out:
-       if (err)
-               return err;
-       return copied;
-}
-
-static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
-{
-       skb = skb_share_check(skb, GFP_ATOMIC);
-       if (!skb)
-               return NET_RX_DROP;
-
-       if (sock_queue_rcv_skb(sk, skb) < 0) {
-               kfree_skb(skb);
-               return NET_RX_DROP;
-       }
-
-       return NET_RX_SUCCESS;
-}
-
-void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
-{
-       struct sock *sk;
-
-       read_lock(&raw_lock);
-       sk_for_each(sk, &raw_head) {
-               bh_lock_sock(sk);
-               if (!sk->sk_bound_dev_if ||
-                   sk->sk_bound_dev_if == dev->ifindex) {
-                       struct sk_buff *clone;
-
-                       clone = skb_clone(skb, GFP_ATOMIC);
-                       if (clone)
-                               raw_rcv_skb(sk, clone);
-               }
-               bh_unlock_sock(sk);
-       }
-       read_unlock(&raw_lock);
-}
-
-static int raw_getsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, int __user *optlen)
-{
-       return -EOPNOTSUPP;
-}
-
-static int raw_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, unsigned int optlen)
-{
-       return -EOPNOTSUPP;
-}
-
-struct proto ieee802154_raw_prot = {
-       .name           = "IEEE-802.15.4-RAW",
-       .owner          = THIS_MODULE,
-       .obj_size       = sizeof(struct sock),
-       .close          = raw_close,
-       .bind           = raw_bind,
-       .sendmsg        = raw_sendmsg,
-       .recvmsg        = raw_recvmsg,
-       .hash           = raw_hash,
-       .unhash         = raw_unhash,
-       .connect        = raw_connect,
-       .disconnect     = raw_disconnect,
-       .getsockopt     = raw_getsockopt,
-       .setsockopt     = raw_setsockopt,
-};
diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
deleted file mode 100644 (file)
index 9d980ed..0000000
+++ /dev/null
@@ -1,585 +0,0 @@
-/*     6LoWPAN fragment reassembly
- *
- *
- *     Authors:
- *     Alexander Aring         <aar@pengutronix.de>
- *
- *     Based on: net/ipv6/reassembly.c
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- */
-
-#define pr_fmt(fmt) "6LoWPAN: " fmt
-
-#include <linux/net.h>
-#include <linux/list.h>
-#include <linux/netdevice.h>
-#include <linux/random.h>
-#include <linux/jhash.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include <net/ieee802154_netdev.h>
-#include <net/6lowpan.h>
-#include <net/ipv6.h>
-#include <net/inet_frag.h>
-
-#include "reassembly.h"
-
-static const char lowpan_frags_cache_name[] = "lowpan-frags";
-
-struct lowpan_frag_info {
-       u16 d_tag;
-       u16 d_size;
-       u8 d_offset;
-};
-
-static struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
-{
-       return (struct lowpan_frag_info *)skb->cb;
-}
-
-static struct inet_frags lowpan_frags;
-
-static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
-                            struct sk_buff *prev, struct net_device *dev);
-
-static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
-                                    const struct ieee802154_addr *saddr,
-                                    const struct ieee802154_addr *daddr)
-{
-       net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
-       return jhash_3words(ieee802154_addr_hash(saddr),
-                           ieee802154_addr_hash(daddr),
-                           (__force u32)(tag + (d_size << 16)),
-                           lowpan_frags.rnd);
-}
-
-static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
-{
-       const struct lowpan_frag_queue *fq;
-
-       fq = container_of(q, struct lowpan_frag_queue, q);
-       return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
-}
-
-static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
-{
-       const struct lowpan_frag_queue *fq;
-       const struct lowpan_create_arg *arg = a;
-
-       fq = container_of(q, struct lowpan_frag_queue, q);
-       return  fq->tag == arg->tag && fq->d_size == arg->d_size &&
-               ieee802154_addr_equal(&fq->saddr, arg->src) &&
-               ieee802154_addr_equal(&fq->daddr, arg->dst);
-}
-
-static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
-{
-       const struct lowpan_create_arg *arg = a;
-       struct lowpan_frag_queue *fq;
-
-       fq = container_of(q, struct lowpan_frag_queue, q);
-
-       fq->tag = arg->tag;
-       fq->d_size = arg->d_size;
-       fq->saddr = *arg->src;
-       fq->daddr = *arg->dst;
-}
-
-static void lowpan_frag_expire(unsigned long data)
-{
-       struct frag_queue *fq;
-       struct net *net;
-
-       fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
-       net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
-
-       spin_lock(&fq->q.lock);
-
-       if (fq->q.flags & INET_FRAG_COMPLETE)
-               goto out;
-
-       inet_frag_kill(&fq->q, &lowpan_frags);
-out:
-       spin_unlock(&fq->q.lock);
-       inet_frag_put(&fq->q, &lowpan_frags);
-}
-
-static inline struct lowpan_frag_queue *
-fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
-       const struct ieee802154_addr *src,
-       const struct ieee802154_addr *dst)
-{
-       struct inet_frag_queue *q;
-       struct lowpan_create_arg arg;
-       unsigned int hash;
-       struct netns_ieee802154_lowpan *ieee802154_lowpan =
-               net_ieee802154_lowpan(net);
-
-       arg.tag = frag_info->d_tag;
-       arg.d_size = frag_info->d_size;
-       arg.src = src;
-       arg.dst = dst;
-
-       hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
-
-       q = inet_frag_find(&ieee802154_lowpan->frags,
-                          &lowpan_frags, &arg, hash);
-       if (IS_ERR_OR_NULL(q)) {
-               inet_frag_maybe_warn_overflow(q, pr_fmt());
-               return NULL;
-       }
-       return container_of(q, struct lowpan_frag_queue, q);
-}
-
-static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
-                            struct sk_buff *skb, const u8 frag_type)
-{
-       struct sk_buff *prev, *next;
-       struct net_device *dev;
-       int end, offset;
-
-       if (fq->q.flags & INET_FRAG_COMPLETE)
-               goto err;
-
-       offset = lowpan_cb(skb)->d_offset << 3;
-       end = lowpan_cb(skb)->d_size;
-
-       /* Is this the final fragment? */
-       if (offset + skb->len == end) {
-               /* If we already have some bits beyond end
-                * or have different end, the segment is corrupted.
-                */
-               if (end < fq->q.len ||
-                   ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
-                       goto err;
-               fq->q.flags |= INET_FRAG_LAST_IN;
-               fq->q.len = end;
-       } else {
-               if (end > fq->q.len) {
-                       /* Some bits beyond end -> corruption. */
-                       if (fq->q.flags & INET_FRAG_LAST_IN)
-                               goto err;
-                       fq->q.len = end;
-               }
-       }
-
-       /* Find out which fragments are in front and at the back of us
-        * in the chain of fragments so far.  We must know where to put
-        * this fragment, right?
-        */
-       prev = fq->q.fragments_tail;
-       if (!prev || lowpan_cb(prev)->d_offset < lowpan_cb(skb)->d_offset) {
-               next = NULL;
-               goto found;
-       }
-       prev = NULL;
-       for (next = fq->q.fragments; next != NULL; next = next->next) {
-               if (lowpan_cb(next)->d_offset >= lowpan_cb(skb)->d_offset)
-                       break;  /* bingo! */
-               prev = next;
-       }
-
-found:
-       /* Insert this fragment in the chain of fragments. */
-       skb->next = next;
-       if (!next)
-               fq->q.fragments_tail = skb;
-       if (prev)
-               prev->next = skb;
-       else
-               fq->q.fragments = skb;
-
-       dev = skb->dev;
-       if (dev)
-               skb->dev = NULL;
-
-       fq->q.stamp = skb->tstamp;
-       if (frag_type == LOWPAN_DISPATCH_FRAG1) {
-               /* Calculate uncomp. 6lowpan header to estimate full size */
-               fq->q.meat += lowpan_uncompress_size(skb, NULL);
-               fq->q.flags |= INET_FRAG_FIRST_IN;
-       } else {
-               fq->q.meat += skb->len;
-       }
-       add_frag_mem_limit(&fq->q, skb->truesize);
-
-       if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
-           fq->q.meat == fq->q.len) {
-               int res;
-               unsigned long orefdst = skb->_skb_refdst;
-
-               skb->_skb_refdst = 0UL;
-               res = lowpan_frag_reasm(fq, prev, dev);
-               skb->_skb_refdst = orefdst;
-               return res;
-       }
-
-       return -1;
-err:
-       kfree_skb(skb);
-       return -1;
-}
-
-/*     Check if this packet is complete.
- *     Returns NULL on failure by any reason, and pointer
- *     to current nexthdr field in reassembled frame.
- *
- *     It is called with locked fq, and caller must check that
- *     queue is eligible for reassembly i.e. it is not COMPLETE,
- *     the last and the first frames arrived and all the bits are here.
- */
-static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
-                            struct net_device *dev)
-{
-       struct sk_buff *fp, *head = fq->q.fragments;
-       int sum_truesize;
-
-       inet_frag_kill(&fq->q, &lowpan_frags);
-
-       /* Make the one we just received the head. */
-       if (prev) {
-               head = prev->next;
-               fp = skb_clone(head, GFP_ATOMIC);
-
-               if (!fp)
-                       goto out_oom;
-
-               fp->next = head->next;
-               if (!fp->next)
-                       fq->q.fragments_tail = fp;
-               prev->next = fp;
-
-               skb_morph(head, fq->q.fragments);
-               head->next = fq->q.fragments->next;
-
-               consume_skb(fq->q.fragments);
-               fq->q.fragments = head;
-       }
-
-       /* Head of list must not be cloned. */
-       if (skb_unclone(head, GFP_ATOMIC))
-               goto out_oom;
-
-       /* If the first fragment is fragmented itself, we split
-        * it to two chunks: the first with data and paged part
-        * and the second, holding only fragments.
-        */
-       if (skb_has_frag_list(head)) {
-               struct sk_buff *clone;
-               int i, plen = 0;
-
-               clone = alloc_skb(0, GFP_ATOMIC);
-               if (!clone)
-                       goto out_oom;
-               clone->next = head->next;
-               head->next = clone;
-               skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
-               skb_frag_list_init(head);
-               for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
-                       plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
-               clone->len = head->data_len - plen;
-               clone->data_len = clone->len;
-               head->data_len -= clone->len;
-               head->len -= clone->len;
-               add_frag_mem_limit(&fq->q, clone->truesize);
-       }
-
-       WARN_ON(head == NULL);
-
-       sum_truesize = head->truesize;
-       for (fp = head->next; fp;) {
-               bool headstolen;
-               int delta;
-               struct sk_buff *next = fp->next;
-
-               sum_truesize += fp->truesize;
-               if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
-                       kfree_skb_partial(fp, headstolen);
-               } else {
-                       if (!skb_shinfo(head)->frag_list)
-                               skb_shinfo(head)->frag_list = fp;
-                       head->data_len += fp->len;
-                       head->len += fp->len;
-                       head->truesize += fp->truesize;
-               }
-               fp = next;
-       }
-       sub_frag_mem_limit(&fq->q, sum_truesize);
-
-       head->next = NULL;
-       head->dev = dev;
-       head->tstamp = fq->q.stamp;
-
-       fq->q.fragments = NULL;
-       fq->q.fragments_tail = NULL;
-
-       return 1;
-out_oom:
-       net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
-       return -1;
-}
-
-static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
-                               struct lowpan_frag_info *frag_info)
-{
-       bool fail;
-       u8 pattern = 0, low = 0;
-       __be16 d_tag = 0;
-
-       fail = lowpan_fetch_skb(skb, &pattern, 1);
-       fail |= lowpan_fetch_skb(skb, &low, 1);
-       frag_info->d_size = (pattern & 7) << 8 | low;
-       fail |= lowpan_fetch_skb(skb, &d_tag, 2);
-       frag_info->d_tag = ntohs(d_tag);
-
-       if (frag_type == LOWPAN_DISPATCH_FRAGN) {
-               fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
-       } else {
-               skb_reset_network_header(skb);
-               frag_info->d_offset = 0;
-       }
-
-       if (unlikely(fail))
-               return -EIO;
-
-       return 0;
-}
-
-int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
-{
-       struct lowpan_frag_queue *fq;
-       struct net *net = dev_net(skb->dev);
-       struct lowpan_frag_info *frag_info = lowpan_cb(skb);
-       struct ieee802154_addr source, dest;
-       int err;
-
-       source = mac_cb(skb)->source;
-       dest = mac_cb(skb)->dest;
-
-       err = lowpan_get_frag_info(skb, frag_type, frag_info);
-       if (err < 0)
-               goto err;
-
-       if (frag_info->d_size > IPV6_MIN_MTU) {
-               net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
-               goto err;
-       }
-
-       fq = fq_find(net, frag_info, &source, &dest);
-       if (fq != NULL) {
-               int ret;
-
-               spin_lock(&fq->q.lock);
-               ret = lowpan_frag_queue(fq, skb, frag_type);
-               spin_unlock(&fq->q.lock);
-
-               inet_frag_put(&fq->q, &lowpan_frags);
-               return ret;
-       }
-
-err:
-       kfree_skb(skb);
-       return -1;
-}
-EXPORT_SYMBOL(lowpan_frag_rcv);
-
-#ifdef CONFIG_SYSCTL
-static int zero;
-
-static struct ctl_table lowpan_frags_ns_ctl_table[] = {
-       {
-               .procname       = "6lowpanfrag_high_thresh",
-               .data           = &init_net.ieee802154_lowpan.frags.high_thresh,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &init_net.ieee802154_lowpan.frags.low_thresh
-       },
-       {
-               .procname       = "6lowpanfrag_low_thresh",
-               .data           = &init_net.ieee802154_lowpan.frags.low_thresh,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &zero,
-               .extra2         = &init_net.ieee802154_lowpan.frags.high_thresh
-       },
-       {
-               .procname       = "6lowpanfrag_time",
-               .data           = &init_net.ieee802154_lowpan.frags.timeout,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       { }
-};
-
-/* secret interval has been deprecated */
-static int lowpan_frags_secret_interval_unused;
-static struct ctl_table lowpan_frags_ctl_table[] = {
-       {
-               .procname       = "6lowpanfrag_secret_interval",
-               .data           = &lowpan_frags_secret_interval_unused,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       { }
-};
-
-static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
-{
-       struct ctl_table *table;
-       struct ctl_table_header *hdr;
-       struct netns_ieee802154_lowpan *ieee802154_lowpan =
-               net_ieee802154_lowpan(net);
-
-       table = lowpan_frags_ns_ctl_table;
-       if (!net_eq(net, &init_net)) {
-               table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
-                               GFP_KERNEL);
-               if (table == NULL)
-                       goto err_alloc;
-
-               table[0].data = &ieee802154_lowpan->frags.high_thresh;
-               table[0].extra1 = &ieee802154_lowpan->frags.low_thresh;
-               table[0].extra2 = &init_net.ieee802154_lowpan.frags.high_thresh;
-               table[1].data = &ieee802154_lowpan->frags.low_thresh;
-               table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
-               table[2].data = &ieee802154_lowpan->frags.timeout;
-
-               /* Don't export sysctls to unprivileged users */
-               if (net->user_ns != &init_user_ns)
-                       table[0].procname = NULL;
-       }
-
-       hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
-       if (hdr == NULL)
-               goto err_reg;
-
-       ieee802154_lowpan->sysctl.frags_hdr = hdr;
-       return 0;
-
-err_reg:
-       if (!net_eq(net, &init_net))
-               kfree(table);
-err_alloc:
-       return -ENOMEM;
-}
-
-static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
-{
-       struct ctl_table *table;
-       struct netns_ieee802154_lowpan *ieee802154_lowpan =
-               net_ieee802154_lowpan(net);
-
-       table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
-       unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
-       if (!net_eq(net, &init_net))
-               kfree(table);
-}
-
-static struct ctl_table_header *lowpan_ctl_header;
-
-static int __init lowpan_frags_sysctl_register(void)
-{
-       lowpan_ctl_header = register_net_sysctl(&init_net,
-                                               "net/ieee802154/6lowpan",
-                                               lowpan_frags_ctl_table);
-       return lowpan_ctl_header == NULL ? -ENOMEM : 0;
-}
-
-static void lowpan_frags_sysctl_unregister(void)
-{
-       unregister_net_sysctl_table(lowpan_ctl_header);
-}
-#else
-static inline int lowpan_frags_ns_sysctl_register(struct net *net)
-{
-       return 0;
-}
-
-static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
-{
-}
-
-static inline int __init lowpan_frags_sysctl_register(void)
-{
-       return 0;
-}
-
-static inline void lowpan_frags_sysctl_unregister(void)
-{
-}
-#endif
-
-static int __net_init lowpan_frags_init_net(struct net *net)
-{
-       struct netns_ieee802154_lowpan *ieee802154_lowpan =
-               net_ieee802154_lowpan(net);
-
-       ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
-       ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
-       ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
-
-       inet_frags_init_net(&ieee802154_lowpan->frags);
-
-       return lowpan_frags_ns_sysctl_register(net);
-}
-
-static void __net_exit lowpan_frags_exit_net(struct net *net)
-{
-       struct netns_ieee802154_lowpan *ieee802154_lowpan =
-               net_ieee802154_lowpan(net);
-
-       lowpan_frags_ns_sysctl_unregister(net);
-       inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
-}
-
-static struct pernet_operations lowpan_frags_ops = {
-       .init = lowpan_frags_init_net,
-       .exit = lowpan_frags_exit_net,
-};
-
-int __init lowpan_net_frag_init(void)
-{
-       int ret;
-
-       ret = lowpan_frags_sysctl_register();
-       if (ret)
-               return ret;
-
-       ret = register_pernet_subsys(&lowpan_frags_ops);
-       if (ret)
-               goto err_pernet;
-
-       lowpan_frags.hashfn = lowpan_hashfn;
-       lowpan_frags.constructor = lowpan_frag_init;
-       lowpan_frags.destructor = NULL;
-       lowpan_frags.skb_free = NULL;
-       lowpan_frags.qsize = sizeof(struct frag_queue);
-       lowpan_frags.match = lowpan_frag_match;
-       lowpan_frags.frag_expire = lowpan_frag_expire;
-       lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
-       ret = inet_frags_init(&lowpan_frags);
-       if (ret)
-               goto err_pernet;
-
-       return ret;
-err_pernet:
-       lowpan_frags_sysctl_unregister();
-       return ret;
-}
-
-void lowpan_net_frag_exit(void)
-{
-       inet_frags_fini(&lowpan_frags);
-       lowpan_frags_sysctl_unregister();
-       unregister_pernet_subsys(&lowpan_frags_ops);
-}
diff --git a/net/ieee802154/reassembly.h b/net/ieee802154/reassembly.h
deleted file mode 100644 (file)
index 836b16f..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef __IEEE802154_6LOWPAN_REASSEMBLY_H__
-#define __IEEE802154_6LOWPAN_REASSEMBLY_H__
-
-#include <net/inet_frag.h>
-
-struct lowpan_create_arg {
-       u16 tag;
-       u16 d_size;
-       const struct ieee802154_addr *src;
-       const struct ieee802154_addr *dst;
-};
-
-/* Equivalent of ipv4 struct ip
- */
-struct lowpan_frag_queue {
-       struct inet_frag_queue  q;
-
-       u16                     tag;
-       u16                     d_size;
-       struct ieee802154_addr  saddr;
-       struct ieee802154_addr  daddr;
-};
-
-static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
-{
-       switch (a->mode) {
-       case IEEE802154_ADDR_LONG:
-               return (((__force u64)a->extended_addr) >> 32) ^
-                       (((__force u64)a->extended_addr) & 0xffffffff);
-       case IEEE802154_ADDR_SHORT:
-               return (__force u32)(a->short_addr);
-       default:
-               return 0;
-       }
-}
-
-int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
-void lowpan_net_frag_exit(void);
-int lowpan_net_frag_init(void);
-
-#endif /* __IEEE802154_6LOWPAN_REASSEMBLY_H__ */
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
new file mode 100644 (file)
index 0000000..2878d8c
--- /dev/null
@@ -0,0 +1,1125 @@
+/*
+ * IEEE802154.4 socket interface
+ *
+ * Copyright 2007, 2008 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Sergey Lapin <slapin@ossfans.org>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ */
+
+#include <linux/net.h>
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/if_arp.h>
+#include <linux/if.h>
+#include <linux/termios.h>     /* For TIOCOUTQ/INQ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <net/datalink.h>
+#include <net/psnap.h>
+#include <net/sock.h>
+#include <net/tcp_states.h>
+#include <net/route.h>
+
+#include <net/af_ieee802154.h>
+#include <net/ieee802154_netdev.h>
+
+/* Utility function for families */
+static struct net_device*
+ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
+{
+       struct net_device *dev = NULL;
+       struct net_device *tmp;
+       __le16 pan_id, short_addr;
+       u8 hwaddr[IEEE802154_ADDR_LEN];
+
+       switch (addr->mode) {
+       case IEEE802154_ADDR_LONG:
+               ieee802154_devaddr_to_raw(hwaddr, addr->extended_addr);
+               rcu_read_lock();
+               dev = dev_getbyhwaddr_rcu(net, ARPHRD_IEEE802154, hwaddr);
+               if (dev)
+                       dev_hold(dev);
+               rcu_read_unlock();
+               break;
+       case IEEE802154_ADDR_SHORT:
+               if (addr->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST) ||
+                   addr->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) ||
+                   addr->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST))
+                       break;
+
+               rtnl_lock();
+
+               for_each_netdev(net, tmp) {
+                       if (tmp->type != ARPHRD_IEEE802154)
+                               continue;
+
+                       pan_id = ieee802154_mlme_ops(tmp)->get_pan_id(tmp);
+                       short_addr =
+                               ieee802154_mlme_ops(tmp)->get_short_addr(tmp);
+
+                       if (pan_id == addr->pan_id &&
+                           short_addr == addr->short_addr) {
+                               dev = tmp;
+                               dev_hold(dev);
+                               break;
+                       }
+               }
+
+               rtnl_unlock();
+               break;
+       default:
+               pr_warn("Unsupported ieee802154 address type: %d\n",
+                       addr->mode);
+               break;
+       }
+
+       return dev;
+}
+
+static int ieee802154_sock_release(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+
+       if (sk) {
+               sock->sk = NULL;
+               sk->sk_prot->close(sk, 0);
+       }
+       return 0;
+}
+
+static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
+                                  struct msghdr *msg, size_t len)
+{
+       struct sock *sk = sock->sk;
+
+       return sk->sk_prot->sendmsg(iocb, sk, msg, len);
+}
+
+static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr,
+                               int addr_len)
+{
+       struct sock *sk = sock->sk;
+
+       if (sk->sk_prot->bind)
+               return sk->sk_prot->bind(sk, uaddr, addr_len);
+
+       return sock_no_bind(sock, uaddr, addr_len);
+}
+
+static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
+                                  int addr_len, int flags)
+{
+       struct sock *sk = sock->sk;
+
+       if (addr_len < sizeof(uaddr->sa_family))
+               return -EINVAL;
+
+       if (uaddr->sa_family == AF_UNSPEC)
+               return sk->sk_prot->disconnect(sk, flags);
+
+       return sk->sk_prot->connect(sk, uaddr, addr_len);
+}
+
+static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
+                               unsigned int cmd)
+{
+       struct ifreq ifr;
+       int ret = -ENOIOCTLCMD;
+       struct net_device *dev;
+
+       if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
+               return -EFAULT;
+
+       ifr.ifr_name[IFNAMSIZ-1] = 0;
+
+       dev_load(sock_net(sk), ifr.ifr_name);
+       dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
+
+       if (!dev)
+               return -ENODEV;
+
+       if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl)
+               ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
+
+       if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq)))
+               ret = -EFAULT;
+       dev_put(dev);
+
+       return ret;
+}
+
+static int ieee802154_sock_ioctl(struct socket *sock, unsigned int cmd,
+                                unsigned long arg)
+{
+       struct sock *sk = sock->sk;
+
+       switch (cmd) {
+       case SIOCGSTAMP:
+               return sock_get_timestamp(sk, (struct timeval __user *)arg);
+       case SIOCGSTAMPNS:
+               return sock_get_timestampns(sk, (struct timespec __user *)arg);
+       case SIOCGIFADDR:
+       case SIOCSIFADDR:
+               return ieee802154_dev_ioctl(sk, (struct ifreq __user *)arg,
+                               cmd);
+       default:
+               if (!sk->sk_prot->ioctl)
+                       return -ENOIOCTLCMD;
+               return sk->sk_prot->ioctl(sk, cmd, arg);
+       }
+}
+
+/* RAW Sockets (802.15.4 created in userspace) */
+static HLIST_HEAD(raw_head);
+static DEFINE_RWLOCK(raw_lock);
+
+static void raw_hash(struct sock *sk)
+{
+       write_lock_bh(&raw_lock);
+       sk_add_node(sk, &raw_head);
+       sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+       write_unlock_bh(&raw_lock);
+}
+
+static void raw_unhash(struct sock *sk)
+{
+       write_lock_bh(&raw_lock);
+       if (sk_del_node_init(sk))
+               sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+       write_unlock_bh(&raw_lock);
+}
+
+static void raw_close(struct sock *sk, long timeout)
+{
+       sk_common_release(sk);
+}
+
+static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len)
+{
+       struct ieee802154_addr addr;
+       struct sockaddr_ieee802154 *uaddr = (struct sockaddr_ieee802154 *)_uaddr;
+       int err = 0;
+       struct net_device *dev = NULL;
+
+       if (len < sizeof(*uaddr))
+               return -EINVAL;
+
+       uaddr = (struct sockaddr_ieee802154 *)_uaddr;
+       if (uaddr->family != AF_IEEE802154)
+               return -EINVAL;
+
+       lock_sock(sk);
+
+       ieee802154_addr_from_sa(&addr, &uaddr->addr);
+       dev = ieee802154_get_dev(sock_net(sk), &addr);
+       if (!dev) {
+               err = -ENODEV;
+               goto out;
+       }
+
+       if (dev->type != ARPHRD_IEEE802154) {
+               err = -ENODEV;
+               goto out_put;
+       }
+
+       sk->sk_bound_dev_if = dev->ifindex;
+       sk_dst_reset(sk);
+
+out_put:
+       dev_put(dev);
+out:
+       release_sock(sk);
+
+       return err;
+}
+
+static int raw_connect(struct sock *sk, struct sockaddr *uaddr,
+                      int addr_len)
+{
+       return -ENOTSUPP;
+}
+
+static int raw_disconnect(struct sock *sk, int flags)
+{
+       return 0;
+}
+
+static int raw_sendmsg(struct kiocb *iocb, struct sock *sk,
+                      struct msghdr *msg, size_t size)
+{
+       struct net_device *dev;
+       unsigned int mtu;
+       struct sk_buff *skb;
+       int hlen, tlen;
+       int err;
+
+       if (msg->msg_flags & MSG_OOB) {
+               pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags);
+               return -EOPNOTSUPP;
+       }
+
+       lock_sock(sk);
+       if (!sk->sk_bound_dev_if)
+               dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
+       else
+               dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if);
+       release_sock(sk);
+
+       if (!dev) {
+               pr_debug("no dev\n");
+               err = -ENXIO;
+               goto out;
+       }
+
+       mtu = dev->mtu;
+       pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
+
+       if (size > mtu) {
+               pr_debug("size = %Zu, mtu = %u\n", size, mtu);
+               err = -EINVAL;
+               goto out_dev;
+       }
+
+       hlen = LL_RESERVED_SPACE(dev);
+       tlen = dev->needed_tailroom;
+       skb = sock_alloc_send_skb(sk, hlen + tlen + size,
+                                 msg->msg_flags & MSG_DONTWAIT, &err);
+       if (!skb)
+               goto out_dev;
+
+       skb_reserve(skb, hlen);
+
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
+
+       err = memcpy_from_msg(skb_put(skb, size), msg, size);
+       if (err < 0)
+               goto out_skb;
+
+       skb->dev = dev;
+       skb->sk  = sk;
+       skb->protocol = htons(ETH_P_IEEE802154);
+
+       dev_put(dev);
+
+       err = dev_queue_xmit(skb);
+       if (err > 0)
+               err = net_xmit_errno(err);
+
+       return err ?: size;
+
+out_skb:
+       kfree_skb(skb);
+out_dev:
+       dev_put(dev);
+out:
+       return err;
+}
+
+static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                      size_t len, int noblock, int flags, int *addr_len)
+{
+       size_t copied = 0;
+       int err = -EOPNOTSUPP;
+       struct sk_buff *skb;
+
+       skb = skb_recv_datagram(sk, flags, noblock, &err);
+       if (!skb)
+               goto out;
+
+       copied = skb->len;
+       if (len < copied) {
+               msg->msg_flags |= MSG_TRUNC;
+               copied = len;
+       }
+
+       err = skb_copy_datagram_msg(skb, 0, msg, copied);
+       if (err)
+               goto done;
+
+       sock_recv_ts_and_drops(msg, sk, skb);
+
+       if (flags & MSG_TRUNC)
+               copied = skb->len;
+done:
+       skb_free_datagram(sk, skb);
+out:
+       if (err)
+               return err;
+       return copied;
+}
+
+static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
+               return NET_RX_DROP;
+
+       if (sock_queue_rcv_skb(sk, skb) < 0) {
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+
+       return NET_RX_SUCCESS;
+}
+
+static void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
+{
+       struct sock *sk;
+
+       read_lock(&raw_lock);
+       sk_for_each(sk, &raw_head) {
+               bh_lock_sock(sk);
+               if (!sk->sk_bound_dev_if ||
+                   sk->sk_bound_dev_if == dev->ifindex) {
+                       struct sk_buff *clone;
+
+                       clone = skb_clone(skb, GFP_ATOMIC);
+                       if (clone)
+                               raw_rcv_skb(sk, clone);
+               }
+               bh_unlock_sock(sk);
+       }
+       read_unlock(&raw_lock);
+}
+
+static int raw_getsockopt(struct sock *sk, int level, int optname,
+                         char __user *optval, int __user *optlen)
+{
+       return -EOPNOTSUPP;
+}
+
+static int raw_setsockopt(struct sock *sk, int level, int optname,
+                         char __user *optval, unsigned int optlen)
+{
+       return -EOPNOTSUPP;
+}
+
+static struct proto ieee802154_raw_prot = {
+       .name           = "IEEE-802.15.4-RAW",
+       .owner          = THIS_MODULE,
+       .obj_size       = sizeof(struct sock),
+       .close          = raw_close,
+       .bind           = raw_bind,
+       .sendmsg        = raw_sendmsg,
+       .recvmsg        = raw_recvmsg,
+       .hash           = raw_hash,
+       .unhash         = raw_unhash,
+       .connect        = raw_connect,
+       .disconnect     = raw_disconnect,
+       .getsockopt     = raw_getsockopt,
+       .setsockopt     = raw_setsockopt,
+};
+
+static const struct proto_ops ieee802154_raw_ops = {
+       .family            = PF_IEEE802154,
+       .owner             = THIS_MODULE,
+       .release           = ieee802154_sock_release,
+       .bind              = ieee802154_sock_bind,
+       .connect           = ieee802154_sock_connect,
+       .socketpair        = sock_no_socketpair,
+       .accept            = sock_no_accept,
+       .getname           = sock_no_getname,
+       .poll              = datagram_poll,
+       .ioctl             = ieee802154_sock_ioctl,
+       .listen            = sock_no_listen,
+       .shutdown          = sock_no_shutdown,
+       .setsockopt        = sock_common_setsockopt,
+       .getsockopt        = sock_common_getsockopt,
+       .sendmsg           = ieee802154_sock_sendmsg,
+       .recvmsg           = sock_common_recvmsg,
+       .mmap              = sock_no_mmap,
+       .sendpage          = sock_no_sendpage,
+#ifdef CONFIG_COMPAT
+       .compat_setsockopt = compat_sock_common_setsockopt,
+       .compat_getsockopt = compat_sock_common_getsockopt,
+#endif
+};
+
+/* DGRAM Sockets (802.15.4 dataframes) */
+static HLIST_HEAD(dgram_head);
+static DEFINE_RWLOCK(dgram_lock);
+
+struct dgram_sock {
+       struct sock sk;
+
+       struct ieee802154_addr src_addr;
+       struct ieee802154_addr dst_addr;
+
+       unsigned int bound:1;
+       unsigned int connected:1;
+       unsigned int want_ack:1;
+       unsigned int secen:1;
+       unsigned int secen_override:1;
+       unsigned int seclevel:3;
+       unsigned int seclevel_override:1;
+};
+
+static inline struct dgram_sock *dgram_sk(const struct sock *sk)
+{
+       return container_of(sk, struct dgram_sock, sk);
+}
+
+static void dgram_hash(struct sock *sk)
+{
+       write_lock_bh(&dgram_lock);
+       sk_add_node(sk, &dgram_head);
+       sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+       write_unlock_bh(&dgram_lock);
+}
+
+static void dgram_unhash(struct sock *sk)
+{
+       write_lock_bh(&dgram_lock);
+       if (sk_del_node_init(sk))
+               sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+       write_unlock_bh(&dgram_lock);
+}
+
+static int dgram_init(struct sock *sk)
+{
+       struct dgram_sock *ro = dgram_sk(sk);
+
+       ro->want_ack = 1;
+       return 0;
+}
+
+static void dgram_close(struct sock *sk, long timeout)
+{
+       sk_common_release(sk);
+}
+
+static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
+{
+       struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
+       struct ieee802154_addr haddr;
+       struct dgram_sock *ro = dgram_sk(sk);
+       int err = -EINVAL;
+       struct net_device *dev;
+
+       lock_sock(sk);
+
+       ro->bound = 0;
+
+       if (len < sizeof(*addr))
+               goto out;
+
+       if (addr->family != AF_IEEE802154)
+               goto out;
+
+       ieee802154_addr_from_sa(&haddr, &addr->addr);
+       dev = ieee802154_get_dev(sock_net(sk), &haddr);
+       if (!dev) {
+               err = -ENODEV;
+               goto out;
+       }
+
+       if (dev->type != ARPHRD_IEEE802154) {
+               err = -ENODEV;
+               goto out_put;
+       }
+
+       ro->src_addr = haddr;
+
+       ro->bound = 1;
+       err = 0;
+out_put:
+       dev_put(dev);
+out:
+       release_sock(sk);
+
+       return err;
+}
+
+static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case SIOCOUTQ:
+       {
+               int amount = sk_wmem_alloc_get(sk);
+
+               return put_user(amount, (int __user *)arg);
+       }
+
+       case SIOCINQ:
+       {
+               struct sk_buff *skb;
+               unsigned long amount;
+
+               amount = 0;
+               spin_lock_bh(&sk->sk_receive_queue.lock);
+               skb = skb_peek(&sk->sk_receive_queue);
+               if (skb) {
+                       /* We will only return the amount
+                        * of this packet since that is all
+                        * that will be read.
+                        */
+                       amount = skb->len - ieee802154_hdr_length(skb);
+               }
+               spin_unlock_bh(&sk->sk_receive_queue.lock);
+               return put_user(amount, (int __user *)arg);
+       }
+       }
+
+       return -ENOIOCTLCMD;
+}
+
+/* FIXME: autobind */
+static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
+                        int len)
+{
+       struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
+       struct dgram_sock *ro = dgram_sk(sk);
+       int err = 0;
+
+       if (len < sizeof(*addr))
+               return -EINVAL;
+
+       if (addr->family != AF_IEEE802154)
+               return -EINVAL;
+
+       lock_sock(sk);
+
+       if (!ro->bound) {
+               err = -ENETUNREACH;
+               goto out;
+       }
+
+       ieee802154_addr_from_sa(&ro->dst_addr, &addr->addr);
+       ro->connected = 1;
+
+out:
+       release_sock(sk);
+       return err;
+}
+
+static int dgram_disconnect(struct sock *sk, int flags)
+{
+       struct dgram_sock *ro = dgram_sk(sk);
+
+       lock_sock(sk);
+       ro->connected = 0;
+       release_sock(sk);
+
+       return 0;
+}
+
+static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
+                        struct msghdr *msg, size_t size)
+{
+       struct net_device *dev;
+       unsigned int mtu;
+       struct sk_buff *skb;
+       struct ieee802154_mac_cb *cb;
+       struct dgram_sock *ro = dgram_sk(sk);
+       struct ieee802154_addr dst_addr;
+       int hlen, tlen;
+       int err;
+
+       if (msg->msg_flags & MSG_OOB) {
+               pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags);
+               return -EOPNOTSUPP;
+       }
+
+       if (!ro->connected && !msg->msg_name)
+               return -EDESTADDRREQ;
+       else if (ro->connected && msg->msg_name)
+               return -EISCONN;
+
+       if (!ro->bound)
+               dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
+       else
+               dev = ieee802154_get_dev(sock_net(sk), &ro->src_addr);
+
+       if (!dev) {
+               pr_debug("no dev\n");
+               err = -ENXIO;
+               goto out;
+       }
+       mtu = dev->mtu;
+       pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
+
+       if (size > mtu) {
+               pr_debug("size = %Zu, mtu = %u\n", size, mtu);
+               err = -EMSGSIZE;
+               goto out_dev;
+       }
+
+       hlen = LL_RESERVED_SPACE(dev);
+       tlen = dev->needed_tailroom;
+       skb = sock_alloc_send_skb(sk, hlen + tlen + size,
+                                 msg->msg_flags & MSG_DONTWAIT,
+                                 &err);
+       if (!skb)
+               goto out_dev;
+
+       skb_reserve(skb, hlen);
+
+       skb_reset_network_header(skb);
+
+       cb = mac_cb_init(skb);
+       cb->type = IEEE802154_FC_TYPE_DATA;
+       cb->ackreq = ro->want_ack;
+
+       if (msg->msg_name) {
+               DECLARE_SOCKADDR(struct sockaddr_ieee802154*,
+                                daddr, msg->msg_name);
+
+               ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
+       } else {
+               dst_addr = ro->dst_addr;
+       }
+
+       cb->secen = ro->secen;
+       cb->secen_override = ro->secen_override;
+       cb->seclevel = ro->seclevel;
+       cb->seclevel_override = ro->seclevel_override;
+
+       err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &dst_addr,
+                             ro->bound ? &ro->src_addr : NULL, size);
+       if (err < 0)
+               goto out_skb;
+
+       err = memcpy_from_msg(skb_put(skb, size), msg, size);
+       if (err < 0)
+               goto out_skb;
+
+       skb->dev = dev;
+       skb->sk  = sk;
+       skb->protocol = htons(ETH_P_IEEE802154);
+
+       dev_put(dev);
+
+       err = dev_queue_xmit(skb);
+       if (err > 0)
+               err = net_xmit_errno(err);
+
+       return err ?: size;
+
+out_skb:
+       kfree_skb(skb);
+out_dev:
+       dev_put(dev);
+out:
+       return err;
+}
+
+static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
+                        struct msghdr *msg, size_t len, int noblock,
+                        int flags, int *addr_len)
+{
+       size_t copied = 0;
+       int err = -EOPNOTSUPP;
+       struct sk_buff *skb;
+       DECLARE_SOCKADDR(struct sockaddr_ieee802154 *, saddr, msg->msg_name);
+
+       skb = skb_recv_datagram(sk, flags, noblock, &err);
+       if (!skb)
+               goto out;
+
+       copied = skb->len;
+       if (len < copied) {
+               msg->msg_flags |= MSG_TRUNC;
+               copied = len;
+       }
+
+       /* FIXME: skip headers if necessary ?! */
+       err = skb_copy_datagram_msg(skb, 0, msg, copied);
+       if (err)
+               goto done;
+
+       sock_recv_ts_and_drops(msg, sk, skb);
+
+       if (saddr) {
+               saddr->family = AF_IEEE802154;
+               ieee802154_addr_to_sa(&saddr->addr, &mac_cb(skb)->source);
+               *addr_len = sizeof(*saddr);
+       }
+
+       if (flags & MSG_TRUNC)
+               copied = skb->len;
+done:
+       skb_free_datagram(sk, skb);
+out:
+       if (err)
+               return err;
+       return copied;
+}
+
+static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
+               return NET_RX_DROP;
+
+       if (sock_queue_rcv_skb(sk, skb) < 0) {
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+
+       return NET_RX_SUCCESS;
+}
+
+static inline bool
+ieee802154_match_sock(__le64 hw_addr, __le16 pan_id, __le16 short_addr,
+                     struct dgram_sock *ro)
+{
+       if (!ro->bound)
+               return true;
+
+       if (ro->src_addr.mode == IEEE802154_ADDR_LONG &&
+           hw_addr == ro->src_addr.extended_addr)
+               return true;
+
+       if (ro->src_addr.mode == IEEE802154_ADDR_SHORT &&
+           pan_id == ro->src_addr.pan_id &&
+           short_addr == ro->src_addr.short_addr)
+               return true;
+
+       return false;
+}
+
+static int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
+{
+       struct sock *sk, *prev = NULL;
+       int ret = NET_RX_SUCCESS;
+       __le16 pan_id, short_addr;
+       __le64 hw_addr;
+
+       /* Data frame processing */
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+       short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
+       hw_addr = ieee802154_devaddr_from_raw(dev->dev_addr);
+
+       read_lock(&dgram_lock);
+       sk_for_each(sk, &dgram_head) {
+               if (ieee802154_match_sock(hw_addr, pan_id, short_addr,
+                                         dgram_sk(sk))) {
+                       if (prev) {
+                               struct sk_buff *clone;
+
+                               clone = skb_clone(skb, GFP_ATOMIC);
+                               if (clone)
+                                       dgram_rcv_skb(prev, clone);
+                       }
+
+                       prev = sk;
+               }
+       }
+
+       if (prev) {
+               dgram_rcv_skb(prev, skb);
+       } else {
+               kfree_skb(skb);
+               ret = NET_RX_DROP;
+       }
+       read_unlock(&dgram_lock);
+
+       return ret;
+}
+
+static int dgram_getsockopt(struct sock *sk, int level, int optname,
+                           char __user *optval, int __user *optlen)
+{
+       struct dgram_sock *ro = dgram_sk(sk);
+
+       int val, len;
+
+       if (level != SOL_IEEE802154)
+               return -EOPNOTSUPP;
+
+       if (get_user(len, optlen))
+               return -EFAULT;
+
+       len = min_t(unsigned int, len, sizeof(int));
+
+       switch (optname) {
+       case WPAN_WANTACK:
+               val = ro->want_ack;
+               break;
+       case WPAN_SECURITY:
+               if (!ro->secen_override)
+                       val = WPAN_SECURITY_DEFAULT;
+               else if (ro->secen)
+                       val = WPAN_SECURITY_ON;
+               else
+                       val = WPAN_SECURITY_OFF;
+               break;
+       case WPAN_SECURITY_LEVEL:
+               if (!ro->seclevel_override)
+                       val = WPAN_SECURITY_LEVEL_DEFAULT;
+               else
+                       val = ro->seclevel;
+               break;
+       default:
+               return -ENOPROTOOPT;
+       }
+
+       if (put_user(len, optlen))
+               return -EFAULT;
+       if (copy_to_user(optval, &val, len))
+               return -EFAULT;
+       return 0;
+}
+
+static int dgram_setsockopt(struct sock *sk, int level, int optname,
+                           char __user *optval, unsigned int optlen)
+{
+       struct dgram_sock *ro = dgram_sk(sk);
+       struct net *net = sock_net(sk);
+       int val;
+       int err = 0;
+
+       if (optlen < sizeof(int))
+               return -EINVAL;
+
+       if (get_user(val, (int __user *)optval))
+               return -EFAULT;
+
+       lock_sock(sk);
+
+       switch (optname) {
+       case WPAN_WANTACK:
+               ro->want_ack = !!val;
+               break;
+       case WPAN_SECURITY:
+               if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
+                   !ns_capable(net->user_ns, CAP_NET_RAW)) {
+                       err = -EPERM;
+                       break;
+               }
+
+               switch (val) {
+               case WPAN_SECURITY_DEFAULT:
+                       ro->secen_override = 0;
+                       break;
+               case WPAN_SECURITY_ON:
+                       ro->secen_override = 1;
+                       ro->secen = 1;
+                       break;
+               case WPAN_SECURITY_OFF:
+                       ro->secen_override = 1;
+                       ro->secen = 0;
+                       break;
+               default:
+                       err = -EINVAL;
+                       break;
+               }
+               break;
+       case WPAN_SECURITY_LEVEL:
+               if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
+                   !ns_capable(net->user_ns, CAP_NET_RAW)) {
+                       err = -EPERM;
+                       break;
+               }
+
+               if (val < WPAN_SECURITY_LEVEL_DEFAULT ||
+                   val > IEEE802154_SCF_SECLEVEL_ENC_MIC128) {
+                       err = -EINVAL;
+               } else if (val == WPAN_SECURITY_LEVEL_DEFAULT) {
+                       ro->seclevel_override = 0;
+               } else {
+                       ro->seclevel_override = 1;
+                       ro->seclevel = val;
+               }
+               break;
+       default:
+               err = -ENOPROTOOPT;
+               break;
+       }
+
+       release_sock(sk);
+       return err;
+}
+
+static struct proto ieee802154_dgram_prot = {
+       .name           = "IEEE-802.15.4-MAC",
+       .owner          = THIS_MODULE,
+       .obj_size       = sizeof(struct dgram_sock),
+       .init           = dgram_init,
+       .close          = dgram_close,
+       .bind           = dgram_bind,
+       .sendmsg        = dgram_sendmsg,
+       .recvmsg        = dgram_recvmsg,
+       .hash           = dgram_hash,
+       .unhash         = dgram_unhash,
+       .connect        = dgram_connect,
+       .disconnect     = dgram_disconnect,
+       .ioctl          = dgram_ioctl,
+       .getsockopt     = dgram_getsockopt,
+       .setsockopt     = dgram_setsockopt,
+};
+
+static const struct proto_ops ieee802154_dgram_ops = {
+       .family            = PF_IEEE802154,
+       .owner             = THIS_MODULE,
+       .release           = ieee802154_sock_release,
+       .bind              = ieee802154_sock_bind,
+       .connect           = ieee802154_sock_connect,
+       .socketpair        = sock_no_socketpair,
+       .accept            = sock_no_accept,
+       .getname           = sock_no_getname,
+       .poll              = datagram_poll,
+       .ioctl             = ieee802154_sock_ioctl,
+       .listen            = sock_no_listen,
+       .shutdown          = sock_no_shutdown,
+       .setsockopt        = sock_common_setsockopt,
+       .getsockopt        = sock_common_getsockopt,
+       .sendmsg           = ieee802154_sock_sendmsg,
+       .recvmsg           = sock_common_recvmsg,
+       .mmap              = sock_no_mmap,
+       .sendpage          = sock_no_sendpage,
+#ifdef CONFIG_COMPAT
+       .compat_setsockopt = compat_sock_common_setsockopt,
+       .compat_getsockopt = compat_sock_common_getsockopt,
+#endif
+};
+
+/* Create a socket. Initialise the socket, blank the addresses
+ * set the state.
+ */
+static int ieee802154_create(struct net *net, struct socket *sock,
+                            int protocol, int kern)
+{
+       struct sock *sk;
+       int rc;
+       struct proto *proto;
+       const struct proto_ops *ops;
+
+       if (!net_eq(net, &init_net))
+               return -EAFNOSUPPORT;
+
+       switch (sock->type) {
+       case SOCK_RAW:
+               proto = &ieee802154_raw_prot;
+               ops = &ieee802154_raw_ops;
+               break;
+       case SOCK_DGRAM:
+               proto = &ieee802154_dgram_prot;
+               ops = &ieee802154_dgram_ops;
+               break;
+       default:
+               rc = -ESOCKTNOSUPPORT;
+               goto out;
+       }
+
+       rc = -ENOMEM;
+       sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto);
+       if (!sk)
+               goto out;
+       rc = 0;
+
+       sock->ops = ops;
+
+       sock_init_data(sock, sk);
+       /* FIXME: sk->sk_destruct */
+       sk->sk_family = PF_IEEE802154;
+
+       /* Checksums on by default */
+       sock_set_flag(sk, SOCK_ZAPPED);
+
+       if (sk->sk_prot->hash)
+               sk->sk_prot->hash(sk);
+
+       if (sk->sk_prot->init) {
+               rc = sk->sk_prot->init(sk);
+               if (rc)
+                       sk_common_release(sk);
+       }
+out:
+       return rc;
+}
+
+static const struct net_proto_family ieee802154_family_ops = {
+       .family         = PF_IEEE802154,
+       .create         = ieee802154_create,
+       .owner          = THIS_MODULE,
+};
+
+static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
+                         struct packet_type *pt, struct net_device *orig_dev)
+{
+       if (!netif_running(dev))
+               goto drop;
+       pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
+#ifdef DEBUG
+       print_hex_dump_bytes("ieee802154_rcv ",
+                            DUMP_PREFIX_NONE, skb->data, skb->len);
+#endif
+
+       if (!net_eq(dev_net(dev), &init_net))
+               goto drop;
+
+       ieee802154_raw_deliver(dev, skb);
+
+       if (dev->type != ARPHRD_IEEE802154)
+               goto drop;
+
+       if (skb->pkt_type != PACKET_OTHERHOST)
+               return ieee802154_dgram_deliver(dev, skb);
+
+drop:
+       kfree_skb(skb);
+       return NET_RX_DROP;
+}
+
+static struct packet_type ieee802154_packet_type = {
+       .type = htons(ETH_P_IEEE802154),
+       .func = ieee802154_rcv,
+};
+
+static int __init af_ieee802154_init(void)
+{
+       int rc = -EINVAL;
+
+       rc = proto_register(&ieee802154_raw_prot, 1);
+       if (rc)
+               goto out;
+
+       rc = proto_register(&ieee802154_dgram_prot, 1);
+       if (rc)
+               goto err_dgram;
+
+       /* Tell SOCKET that we are alive */
+       rc = sock_register(&ieee802154_family_ops);
+       if (rc)
+               goto err_sock;
+       dev_add_pack(&ieee802154_packet_type);
+
+       rc = 0;
+       goto out;
+
+err_sock:
+       proto_unregister(&ieee802154_dgram_prot);
+err_dgram:
+       proto_unregister(&ieee802154_raw_prot);
+out:
+       return rc;
+}
+
+static void __exit af_ieee802154_remove(void)
+{
+       dev_remove_pack(&ieee802154_packet_type);
+       sock_unregister(PF_IEEE802154);
+       proto_unregister(&ieee802154_dgram_prot);
+       proto_unregister(&ieee802154_raw_prot);
+}
+
+module_init(af_ieee802154_init);
+module_exit(af_ieee802154_remove);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(PF_IEEE802154);
index a44773c8346c13c24535448f7e33105c894ac279..d2e49baaff63420320486d310f5c8f7d0d54bcc2 100644 (file)
@@ -395,8 +395,6 @@ int inet_release(struct socket *sock)
        if (sk) {
                long timeout;
 
-               sock_rps_reset_flow(sk);
-
                /* Applications forget to leave groups before exiting */
                ip_mc_drop_socket(sk);
 
index 214882e7d6deea0114531124689027dfdde83df6..f0b4a31d7bd6d23b114600ad454fa79c25945fe7 100644 (file)
@@ -1522,7 +1522,8 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
                          preferred, valid))
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -1566,7 +1567,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
                                if (inet_fill_ifaddr(skb, ifa,
                                             NETLINK_CB(cb->skb).portid,
                                             cb->nlh->nlmsg_seq,
-                                            RTM_NEWADDR, NLM_F_MULTI) <= 0) {
+                                            RTM_NEWADDR, NLM_F_MULTI) < 0) {
                                        rcu_read_unlock();
                                        goto done;
                                }
@@ -1749,7 +1750,8 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
                        IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -1881,7 +1883,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
                                                      cb->nlh->nlmsg_seq,
                                                      RTM_NEWNETCONF,
                                                      NLM_F_MULTI,
-                                                     -1) <= 0) {
+                                                     -1) < 0) {
                                rcu_read_unlock();
                                goto done;
                        }
@@ -1897,7 +1899,7 @@ cont:
                                              NETLINK_CB(cb->skb).portid,
                                              cb->nlh->nlmsg_seq,
                                              RTM_NEWNETCONF, NLM_F_MULTI,
-                                             -1) <= 0)
+                                             -1) < 0)
                        goto done;
                else
                        h++;
@@ -1908,7 +1910,7 @@ cont:
                                              NETLINK_CB(cb->skb).portid,
                                              cb->nlh->nlmsg_seq,
                                              RTM_NEWNETCONF, NLM_F_MULTI,
-                                             -1) <= 0)
+                                             -1) < 0)
                        goto done;
                else
                        h++;
@@ -2320,7 +2322,7 @@ static __net_initdata struct pernet_operations devinet_ops = {
        .exit = devinet_exit_net,
 };
 
-static struct rtnl_af_ops inet_af_ops = {
+static struct rtnl_af_ops inet_af_ops __read_mostly = {
        .family           = AF_INET,
        .fill_link_af     = inet_fill_link_af,
        .get_link_af_size = inet_get_link_af_size,
index 1e4f6600b31d4929cce729dcb38a3227b0de0a48..825981b1049a6c17ec065c000452cdf98e41bacf 100644 (file)
@@ -32,7 +32,6 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u32 tb_id,
                  unsigned int);
 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, int dst_len,
               u32 tb_id, const struct nl_info *info, unsigned int nlm_flags);
-struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio);
 
 static inline void fib_result_assign(struct fib_result *res,
                                     struct fib_info *fi)
index f99f41bd15b83072d7a67cd452a29ce550e0593e..1e2090ea663e209b739e01186b3939802630d63f 100644 (file)
@@ -360,7 +360,8 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi)
                         + nla_total_size(4) /* RTA_TABLE */
                         + nla_total_size(4) /* RTA_DST */
                         + nla_total_size(4) /* RTA_PRIORITY */
-                        + nla_total_size(4); /* RTA_PREFSRC */
+                        + nla_total_size(4) /* RTA_PREFSRC */
+                        + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
 
        /* space for nested metrics */
        payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
@@ -410,24 +411,6 @@ errout:
                rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
 }
 
-/* Return the first fib alias matching TOS with
- * priority less than or equal to PRIO.
- */
-struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
-{
-       if (fah) {
-               struct fib_alias *fa;
-               list_for_each_entry(fa, fah, fa_list) {
-                       if (fa->fa_tos > tos)
-                               continue;
-                       if (fa->fa_info->fib_priority >= prio ||
-                           fa->fa_tos < tos)
-                               return fa;
-               }
-       }
-       return NULL;
-}
-
 static int fib_detect_death(struct fib_info *fi, int order,
                            struct fib_info **last_resort, int *last_idx,
                            int dflt)
@@ -859,7 +842,16 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
 
                                if (type > RTAX_MAX)
                                        goto err_inval;
-                               val = nla_get_u32(nla);
+                               if (type == RTAX_CC_ALGO) {
+                                       char tmp[TCP_CA_NAME_MAX];
+
+                                       nla_strlcpy(tmp, nla, sizeof(tmp));
+                                       val = tcp_ca_get_key_by_name(tmp);
+                                       if (val == TCP_CA_UNSPEC)
+                                               goto err_inval;
+                               } else {
+                                       val = nla_get_u32(nla);
+                               }
                                if (type == RTAX_ADVMSS && val > 65535 - 40)
                                        val = 65535 - 40;
                                if (type == RTAX_MTU && val > 65535 - 15)
@@ -1081,7 +1073,8 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                nla_nest_end(skb, mp);
        }
 #endif
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
index 281e5e00025f1f63ade813db5e8e0d8b9ceb0bbf..3daf0224ff2e1821ce9c258a48db1c3d20437657 100644 (file)
@@ -83,7 +83,8 @@
 
 #define MAX_STAT_DEPTH 32
 
-#define KEYLENGTH (8*sizeof(t_key))
+#define KEYLENGTH      (8*sizeof(t_key))
+#define KEY_MAX                ((t_key)~0)
 
 typedef unsigned int t_key;
 
@@ -102,8 +103,8 @@ struct tnode {
        union {
                /* The fields in this struct are valid if bits > 0 (TNODE) */
                struct {
-                       unsigned int full_children;  /* KEYLENGTH bits needed */
-                       unsigned int empty_children; /* KEYLENGTH bits needed */
+                       t_key empty_children; /* KEYLENGTH bits needed */
+                       t_key full_children;  /* KEYLENGTH bits needed */
                        struct tnode __rcu *child[0];
                };
                /* This list pointer if valid if bits == 0 (LEAF) */
@@ -302,6 +303,16 @@ static struct tnode *tnode_alloc(size_t size)
                return vzalloc(size);
 }
 
+static inline void empty_child_inc(struct tnode *n)
+{
+       ++n->empty_children ? : ++n->full_children;
+}
+
+static inline void empty_child_dec(struct tnode *n)
+{
+       n->empty_children-- ? : n->full_children--;
+}
+
 static struct tnode *leaf_new(t_key key)
 {
        struct tnode *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
@@ -335,7 +346,7 @@ static struct leaf_info *leaf_info_new(int plen)
 
 static struct tnode *tnode_new(t_key key, int pos, int bits)
 {
-       size_t sz = offsetof(struct tnode, child[1 << bits]);
+       size_t sz = offsetof(struct tnode, child[1ul << bits]);
        struct tnode *tn = tnode_alloc(sz);
        unsigned int shift = pos + bits;
 
@@ -348,8 +359,10 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
                tn->pos = pos;
                tn->bits = bits;
                tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
-               tn->full_children = 0;
-               tn->empty_children = 1<<bits;
+               if (bits == KEYLENGTH)
+                       tn->full_children = 1;
+               else
+                       tn->empty_children = 1ul << bits;
        }
 
        pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
@@ -375,11 +388,11 @@ static void put_child(struct tnode *tn, unsigned long i, struct tnode *n)
 
        BUG_ON(i >= tnode_child_length(tn));
 
-       /* update emptyChildren */
+       /* update emptyChildren, overflow into fullChildren */
        if (n == NULL && chi != NULL)
-               tn->empty_children++;
-       else if (n != NULL && chi == NULL)
-               tn->empty_children--;
+               empty_child_inc(tn);
+       if (n != NULL && chi == NULL)
+               empty_child_dec(tn);
 
        /* update fullChildren */
        wasfull = tnode_full(tn, chi);
@@ -396,8 +409,30 @@ static void put_child(struct tnode *tn, unsigned long i, struct tnode *n)
        rcu_assign_pointer(tn->child[i], n);
 }
 
-static void put_child_root(struct tnode *tp, struct trie *t,
-                          t_key key, struct tnode *n)
+static void update_children(struct tnode *tn)
+{
+       unsigned long i;
+
+       /* update all of the child parent pointers */
+       for (i = tnode_child_length(tn); i;) {
+               struct tnode *inode = tnode_get_child(tn, --i);
+
+               if (!inode)
+                       continue;
+
+               /* Either update the children of a tnode that
+                * already belongs to us or update the child
+                * to point to ourselves.
+                */
+               if (node_parent(inode) == tn)
+                       update_children(inode);
+               else
+                       node_set_parent(inode, tn);
+       }
+}
+
+static inline void put_child_root(struct tnode *tp, struct trie *t,
+                                 t_key key, struct tnode *n)
 {
        if (tp)
                put_child(tp, get_index(key, tp), n);
@@ -434,10 +469,35 @@ static void tnode_free(struct tnode *tn)
        }
 }
 
+static void replace(struct trie *t, struct tnode *oldtnode, struct tnode *tn)
+{
+       struct tnode *tp = node_parent(oldtnode);
+       unsigned long i;
+
+       /* setup the parent pointer out of and back into this node */
+       NODE_INIT_PARENT(tn, tp);
+       put_child_root(tp, t, tn->key, tn);
+
+       /* update all of the child parent pointers */
+       update_children(tn);
+
+       /* all pointers should be clean so we are done */
+       tnode_free(oldtnode);
+
+       /* resize children now that oldtnode is freed */
+       for (i = tnode_child_length(tn); i;) {
+               struct tnode *inode = tnode_get_child(tn, --i);
+
+               /* resize child node */
+               if (tnode_full(tn, inode))
+                       resize(t, inode);
+       }
+}
+
 static int inflate(struct trie *t, struct tnode *oldtnode)
 {
-       struct tnode *inode, *node0, *node1, *tn, *tp;
-       unsigned long i, j, k;
+       struct tnode *tn;
+       unsigned long i;
        t_key m;
 
        pr_debug("In inflate\n");
@@ -446,13 +506,18 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
        if (!tn)
                return -ENOMEM;
 
+       /* prepare oldtnode to be freed */
+       tnode_free_init(oldtnode);
+
        /* Assemble all of the pointers in our cluster, in this case that
         * represents all of the pointers out of our allocated nodes that
         * point to existing tnodes and the links between our allocated
         * nodes.
         */
        for (i = tnode_child_length(oldtnode), m = 1u << tn->pos; i;) {
-               inode = tnode_get_child(oldtnode, --i);
+               struct tnode *inode = tnode_get_child(oldtnode, --i);
+               struct tnode *node0, *node1;
+               unsigned long j, k;
 
                /* An empty child */
                if (inode == NULL)
@@ -464,6 +529,9 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
                        continue;
                }
 
+               /* drop the node in the old tnode free list */
+               tnode_free_append(oldtnode, inode);
+
                /* An internal node with two children */
                if (inode->bits == 1) {
                        put_child(tn, 2 * i + 1, tnode_get_child(inode, 1));
@@ -488,9 +556,9 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
                node1 = tnode_new(inode->key | m, inode->pos, inode->bits - 1);
                if (!node1)
                        goto nomem;
-               tnode_free_append(tn, node1);
+               node0 = tnode_new(inode->key, inode->pos, inode->bits - 1);
 
-               node0 = tnode_new(inode->key & ~m, inode->pos, inode->bits - 1);
+               tnode_free_append(tn, node1);
                if (!node0)
                        goto nomem;
                tnode_free_append(tn, node0);
@@ -512,53 +580,9 @@ static int inflate(struct trie *t, struct tnode *oldtnode)
                put_child(tn, 2 * i, node0);
        }
 
-       /* setup the parent pointer into and out of this node */
-       tp = node_parent(oldtnode);
-       NODE_INIT_PARENT(tn, tp);
-       put_child_root(tp, t, tn->key, tn);
-
-       /* prepare oldtnode to be freed */
-       tnode_free_init(oldtnode);
-
-       /* update all child nodes parent pointers to route to us */
-       for (i = tnode_child_length(oldtnode); i;) {
-               inode = tnode_get_child(oldtnode, --i);
-
-               /* A leaf or an internal node with skipped bits */
-               if (!tnode_full(oldtnode, inode)) {
-                       node_set_parent(inode, tn);
-                       continue;
-               }
-
-               /* drop the node in the old tnode free list */
-               tnode_free_append(oldtnode, inode);
-
-               /* fetch new nodes */
-               node1 = tnode_get_child(tn, 2 * i + 1);
-               node0 = tnode_get_child(tn, 2 * i);
-
-               /* bits == 1 then node0 and node1 represent inode's children */
-               if (inode->bits == 1) {
-                       node_set_parent(node1, tn);
-                       node_set_parent(node0, tn);
-                       continue;
-               }
-
-               /* update parent pointers in child node's children */
-               for (k = tnode_child_length(inode), j = k / 2; j;) {
-                       node_set_parent(tnode_get_child(inode, --k), node1);
-                       node_set_parent(tnode_get_child(inode, --j), node0);
-                       node_set_parent(tnode_get_child(inode, --k), node1);
-                       node_set_parent(tnode_get_child(inode, --j), node0);
-               }
+       /* setup the parent pointers into and out of this node */
+       replace(t, oldtnode, tn);
 
-               /* resize child nodes */
-               resize(t, node1);
-               resize(t, node0);
-       }
-
-       /* we completed without error, prepare to free old node */
-       tnode_free(oldtnode);
        return 0;
 nomem:
        /* all pointers should be clean so we are done */
@@ -568,7 +592,7 @@ nomem:
 
 static int halve(struct trie *t, struct tnode *oldtnode)
 {
-       struct tnode *tn, *tp, *inode, *node0, *node1;
+       struct tnode *tn;
        unsigned long i;
 
        pr_debug("In halve\n");
@@ -577,14 +601,18 @@ static int halve(struct trie *t, struct tnode *oldtnode)
        if (!tn)
                return -ENOMEM;
 
+       /* prepare oldtnode to be freed */
+       tnode_free_init(oldtnode);
+
        /* Assemble all of the pointers in our cluster, in this case that
         * represents all of the pointers out of our allocated nodes that
         * point to existing tnodes and the links between our allocated
         * nodes.
         */
        for (i = tnode_child_length(oldtnode); i;) {
-               node1 = tnode_get_child(oldtnode, --i);
-               node0 = tnode_get_child(oldtnode, --i);
+               struct tnode *node1 = tnode_get_child(oldtnode, --i);
+               struct tnode *node0 = tnode_get_child(oldtnode, --i);
+               struct tnode *inode;
 
                /* At least one of the children is empty */
                if (!node1 || !node0) {
@@ -609,36 +637,28 @@ static int halve(struct trie *t, struct tnode *oldtnode)
                put_child(tn, i / 2, inode);
        }
 
-       /* setup the parent pointer out of and back into this node */
-       tp = node_parent(oldtnode);
-       NODE_INIT_PARENT(tn, tp);
-       put_child_root(tp, t, tn->key, tn);
-
-       /* prepare oldtnode to be freed */
-       tnode_free_init(oldtnode);
+       /* setup the parent pointers into and out of this node */
+       replace(t, oldtnode, tn);
 
-       /* update all of the child parent pointers */
-       for (i = tnode_child_length(tn); i;) {
-               inode = tnode_get_child(tn, --i);
-
-               /* only new tnodes will be considered "full" nodes */
-               if (!tnode_full(tn, inode)) {
-                       node_set_parent(inode, tn);
-                       continue;
-               }
+       return 0;
+}
 
-               /* Two nonempty children */
-               node_set_parent(tnode_get_child(inode, 1), inode);
-               node_set_parent(tnode_get_child(inode, 0), inode);
+static void collapse(struct trie *t, struct tnode *oldtnode)
+{
+       struct tnode *n, *tp;
+       unsigned long i;
 
-               /* resize child node */
-               resize(t, inode);
-       }
+       /* scan the tnode looking for that one child that might still exist */
+       for (n = NULL, i = tnode_child_length(oldtnode); !n && i;)
+               n = tnode_get_child(oldtnode, --i);
 
-       /* all pointers should be clean so we are done */
-       tnode_free(oldtnode);
+       /* compress one level */
+       tp = node_parent(oldtnode);
+       put_child_root(tp, t, oldtnode->key, n);
+       node_set_parent(n, tp);
 
-       return 0;
+       /* drop dead node */
+       node_free(oldtnode);
 }
 
 static unsigned char update_suffix(struct tnode *tn)
@@ -740,10 +760,12 @@ static bool should_inflate(const struct tnode *tp, const struct tnode *tn)
 
        /* Keep root node larger */
        threshold *= tp ? inflate_threshold : inflate_threshold_root;
-       used += tn->full_children;
        used -= tn->empty_children;
+       used += tn->full_children;
+
+       /* if bits == KEYLENGTH then pos = 0, and will fail below */
 
-       return tn->pos && ((50 * used) >= threshold);
+       return (used > 1) && tn->pos && ((50 * used) >= threshold);
 }
 
 static bool should_halve(const struct tnode *tp, const struct tnode *tn)
@@ -755,15 +777,31 @@ static bool should_halve(const struct tnode *tp, const struct tnode *tn)
        threshold *= tp ? halve_threshold : halve_threshold_root;
        used -= tn->empty_children;
 
-       return (tn->bits > 1) && ((100 * used) < threshold);
+       /* if bits == KEYLENGTH then used = 100% on wrap, and will fail below */
+
+       return (used > 1) && (tn->bits > 1) && ((100 * used) < threshold);
+}
+
+static bool should_collapse(const struct tnode *tn)
+{
+       unsigned long used = tnode_child_length(tn);
+
+       used -= tn->empty_children;
+
+       /* account for bits == KEYLENGTH case */
+       if ((tn->bits == KEYLENGTH) && tn->full_children)
+               used -= KEY_MAX;
+
+       /* One child or none, time to drop us from the trie */
+       return used < 2;
 }
 
 #define MAX_WORK 10
 static void resize(struct trie *t, struct tnode *tn)
 {
-       struct tnode *tp = node_parent(tn), *n = NULL;
+       struct tnode *tp = node_parent(tn);
        struct tnode __rcu **cptr;
-       int max_work;
+       int max_work = MAX_WORK;
 
        pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
                 tn, inflate_threshold, halve_threshold);
@@ -775,19 +813,10 @@ static void resize(struct trie *t, struct tnode *tn)
        cptr = tp ? &tp->child[get_index(tn->key, tp)] : &t->trie;
        BUG_ON(tn != rtnl_dereference(*cptr));
 
-       /* No children */
-       if (tn->empty_children > (tnode_child_length(tn) - 1))
-               goto no_children;
-
-       /* One child */
-       if (tn->empty_children == (tnode_child_length(tn) - 1))
-               goto one_child;
-
        /* Double as long as the resulting node has a number of
         * nonempty nodes that are above the threshold.
         */
-       max_work = MAX_WORK;
-       while (should_inflate(tp, tn) && max_work--) {
+       while (should_inflate(tp, tn) && max_work) {
                if (inflate(t, tn)) {
 #ifdef CONFIG_IP_FIB_TRIE_STATS
                        this_cpu_inc(t->stats->resize_node_skipped);
@@ -795,6 +824,7 @@ static void resize(struct trie *t, struct tnode *tn)
                        break;
                }
 
+               max_work--;
                tn = rtnl_dereference(*cptr);
        }
 
@@ -805,8 +835,7 @@ static void resize(struct trie *t, struct tnode *tn)
        /* Halve as long as the number of empty children in this
         * node is above threshold.
         */
-       max_work = MAX_WORK;
-       while (should_halve(tp, tn) && max_work--) {
+       while (should_halve(tp, tn) && max_work) {
                if (halve(t, tn)) {
 #ifdef CONFIG_IP_FIB_TRIE_STATS
                        this_cpu_inc(t->stats->resize_node_skipped);
@@ -814,23 +843,13 @@ static void resize(struct trie *t, struct tnode *tn)
                        break;
                }
 
+               max_work--;
                tn = rtnl_dereference(*cptr);
        }
 
        /* Only one child remains */
-       if (tn->empty_children == (tnode_child_length(tn) - 1)) {
-               unsigned long i;
-one_child:
-               for (i = tnode_child_length(tn); !n && i;)
-                       n = tnode_get_child(tn, --i);
-no_children:
-               /* compress one level */
-               put_child_root(tp, t, tn->key, n);
-               node_set_parent(n, tp);
-
-               /* drop dead node */
-               tnode_free_init(tn);
-               tnode_free(tn);
+       if (should_collapse(tn)) {
+               collapse(t, tn);
                return;
        }
 
@@ -898,27 +917,20 @@ static void leaf_push_suffix(struct tnode *l)
 
 static void remove_leaf_info(struct tnode *l, struct leaf_info *old)
 {
-       struct hlist_node *prev;
-
-       /* record the location of the pointer to this object */
-       prev = rtnl_dereference(hlist_pprev_rcu(&old->hlist));
+       /* record the location of the previous list_info entry */
+       struct hlist_node **pprev = old->hlist.pprev;
+       struct leaf_info *li = hlist_entry(pprev, typeof(*li), hlist.next);
 
        /* remove the leaf info from the list */
        hlist_del_rcu(&old->hlist);
 
-       /* if we emptied the list this leaf will be freed and we can sort
-        * out parent suffix lengths as a part of trie_rebalance
-        */
-       if (hlist_empty(&l->list))
+       /* only access li if it is pointing at the last valid hlist_node */
+       if (hlist_empty(&l->list) || (*pprev))
                return;
 
-       /* if we removed the tail then we need to update slen */
-       if (!rcu_access_pointer(hlist_next_rcu(prev))) {
-               struct leaf_info *li = hlist_entry(prev, typeof(*li), hlist);
-
-               l->slen = KEYLENGTH - li->plen;
-               leaf_pull_suffix(l);
-       }
+       /* update the trie with the latest suffix length */
+       l->slen = KEYLENGTH - li->plen;
+       leaf_pull_suffix(l);
 }
 
 static void insert_leaf_info(struct tnode *l, struct leaf_info *new)
@@ -942,7 +954,7 @@ static void insert_leaf_info(struct tnode *l, struct leaf_info *new)
        }
 
        /* if we added to the tail node then we need to update slen */
-       if (!rcu_access_pointer(hlist_next_rcu(&new->hlist))) {
+       if (l->slen < (KEYLENGTH - new->plen)) {
                l->slen = KEYLENGTH - new->plen;
                leaf_push_suffix(l);
        }
@@ -961,12 +973,12 @@ static struct tnode *fib_find_node(struct trie *t, u32 key)
                 * prefix plus zeros for the bits in the cindex. The index
                 * is the difference between the key and this value.  From
                 * this we can actually derive several pieces of data.
-                *   if !(index >> bits)
-                *     we know the value is cindex
-                *   else
+                *   if (index & (~0ul << bits))
                 *     we have a mismatch in skip bits and failed
+                *   else
+                *     we know the value is cindex
                 */
-               if (index >> n->bits)
+               if (index & (~0ul << n->bits))
                        return NULL;
 
                /* we have found a leaf. Prefixes have already been compared */
@@ -979,6 +991,26 @@ static struct tnode *fib_find_node(struct trie *t, u32 key)
        return n;
 }
 
+/* Return the first fib alias matching TOS with
+ * priority less than or equal to PRIO.
+ */
+static struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
+{
+       struct fib_alias *fa;
+
+       if (!fah)
+               return NULL;
+
+       list_for_each_entry(fa, fah, fa_list) {
+               if (fa->fa_tos > tos)
+                       continue;
+               if (fa->fa_info->fib_priority >= prio || fa->fa_tos < tos)
+                       return fa;
+       }
+
+       return NULL;
+}
+
 static void trie_rebalance(struct trie *t, struct tnode *tn)
 {
        struct tnode *tp;
@@ -1301,12 +1333,12 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
                 * prefix plus zeros for the "bits" in the prefix. The index
                 * is the difference between the key and this value.  From
                 * this we can actually derive several pieces of data.
-                *   if !(index >> bits)
-                *     we know the value is child index
-                *   else
+                *   if (index & (~0ul << bits))
                 *     we have a mismatch in skip bits and failed
+                *   else
+                *     we know the value is cindex
                 */
-               if (index >> n->bits)
+               if (index & (~0ul << n->bits))
                        break;
 
                /* we have found a leaf. Prefixes have already been compared */
@@ -1574,6 +1606,7 @@ static int trie_flush_leaf(struct tnode *l)
        struct hlist_head *lih = &l->list;
        struct hlist_node *tmp;
        struct leaf_info *li = NULL;
+       unsigned char plen = KEYLENGTH;
 
        hlist_for_each_entry_safe(li, tmp, lih, hlist) {
                found += trie_flush_list(&li->falh);
@@ -1581,8 +1614,14 @@ static int trie_flush_leaf(struct tnode *l)
                if (list_empty(&li->falh)) {
                        hlist_del_rcu(&li->hlist);
                        free_leaf_info(li);
+                       continue;
                }
+
+               plen = li->plen;
        }
+
+       l->slen = KEYLENGTH - plen;
+
        return found;
 }
 
@@ -1661,13 +1700,22 @@ int fib_table_flush(struct fib_table *tb)
        for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
                found += trie_flush_leaf(l);
 
-               if (ll && hlist_empty(&ll->list))
-                       trie_leaf_remove(t, ll);
+               if (ll) {
+                       if (hlist_empty(&ll->list))
+                               trie_leaf_remove(t, ll);
+                       else
+                               leaf_pull_suffix(ll);
+               }
+
                ll = l;
        }
 
-       if (ll && hlist_empty(&ll->list))
-               trie_leaf_remove(t, ll);
+       if (ll) {
+               if (hlist_empty(&ll->list))
+                       trie_leaf_remove(t, ll);
+               else
+                       leaf_pull_suffix(ll);
+       }
 
        pr_debug("trie_flush found=%d\n", found);
        return found;
@@ -1935,16 +1983,10 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
                        hlist_for_each_entry_rcu(li, &n->list, hlist)
                                ++s->prefixes;
                } else {
-                       unsigned long i;
-
                        s->tnodes++;
                        if (n->bits < MAX_STAT_DEPTH)
                                s->nodesizes[n->bits]++;
-
-                       for (i = tnode_child_length(n); i--;) {
-                               if (!rcu_access_pointer(n->child[i]))
-                                       s->nullpointers++;
-                       }
+                       s->nullpointers += n->empty_children;
                }
        }
        rcu_read_unlock();
index b986298a7ba39908290ccd808a24947d776b91b4..92ddea1e645732118d982685c4bab5b2d6c03641 100644 (file)
@@ -70,7 +70,6 @@ static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
        size_t start = ntohs(pd[0]);
        size_t offset = ntohs(pd[1]);
        size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
-       __wsum delta;
 
        if (skb->remcsum_offload) {
                /* Already processed in GRO path */
@@ -82,14 +81,7 @@ static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
                return NULL;
        guehdr = (struct guehdr *)&udp_hdr(skb)[1];
 
-       if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE))
-               __skb_checksum_complete(skb);
-
-       delta = remcsum_adjust((void *)guehdr + hdrlen,
-                              skb->csum, start, offset);
-
-       /* Adjust skb->csum since we changed the packet */
-       skb->csum = csum_add(skb->csum, delta);
+       skb_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset);
 
        return guehdr;
 }
@@ -174,7 +166,8 @@ drop:
 }
 
 static struct sk_buff **fou_gro_receive(struct sk_buff **head,
-                                       struct sk_buff *skb)
+                                       struct sk_buff *skb,
+                                       struct udp_offload *uoff)
 {
        const struct net_offload *ops;
        struct sk_buff **pp = NULL;
@@ -195,7 +188,8 @@ out_unlock:
        return pp;
 }
 
-static int fou_gro_complete(struct sk_buff *skb, int nhoff)
+static int fou_gro_complete(struct sk_buff *skb, int nhoff,
+                           struct udp_offload *uoff)
 {
        const struct net_offload *ops;
        u8 proto = NAPI_GRO_CB(skb)->proto;
@@ -226,7 +220,6 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
        size_t start = ntohs(pd[0]);
        size_t offset = ntohs(pd[1]);
        size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
-       __wsum delta;
 
        if (skb->remcsum_offload)
                return guehdr;
@@ -241,12 +234,7 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
                        return NULL;
        }
 
-       delta = remcsum_adjust((void *)guehdr + hdrlen,
-                              NAPI_GRO_CB(skb)->csum, start, offset);
-
-       /* Adjust skb->csum since we changed the packet */
-       skb->csum = csum_add(skb->csum, delta);
-       NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+       skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset);
 
        skb->remcsum_offload = 1;
 
@@ -254,7 +242,8 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
 }
 
 static struct sk_buff **gue_gro_receive(struct sk_buff **head,
-                                       struct sk_buff *skb)
+                                       struct sk_buff *skb,
+                                       struct udp_offload *uoff)
 {
        const struct net_offload **offloads;
        const struct net_offload *ops;
@@ -360,7 +349,8 @@ out:
        return pp;
 }
 
-static int gue_gro_complete(struct sk_buff *skb, int nhoff)
+static int gue_gro_complete(struct sk_buff *skb, int nhoff,
+                           struct udp_offload *uoff)
 {
        const struct net_offload **offloads;
        struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
@@ -490,7 +480,7 @@ static int fou_create(struct net *net, struct fou_cfg *cfg,
        sk->sk_user_data = fou;
        fou->sock = sock;
 
-       udp_set_convert_csum(sk, true);
+       inet_inc_convert_csum(sk);
 
        sk->sk_allocation = GFP_ATOMIC;
 
index 19e256e1dd92f374266d52612d483667de684cf7..5a4828ba05ad7997998e400d10bc8b1dfbe99db0 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/skbuff.h>
-#include <linux/rculist.h>
+#include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/in.h>
 #include <linux/ip.h>
@@ -26,8 +26,8 @@
 #include <linux/etherdevice.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
-#include <linux/hash.h>
 #include <linux/ethtool.h>
+#include <linux/mutex.h>
 #include <net/arp.h>
 #include <net/ndisc.h>
 #include <net/ip.h>
 #include <net/ip6_checksum.h>
 #endif
 
-#define PORT_HASH_BITS 8
-#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
+/* Protects sock_list and refcounts. */
+static DEFINE_MUTEX(geneve_mutex);
 
 /* per-network namespace private data for this module */
 struct geneve_net {
-       struct hlist_head       sock_list[PORT_HASH_SIZE];
-       spinlock_t              sock_lock;   /* Protects sock_list */
+       struct list_head        sock_list;
 };
 
 static int geneve_net_id;
 
-static struct workqueue_struct *geneve_wq;
-
 static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
 {
        return (struct genevehdr *)(udp_hdr(skb) + 1);
 }
 
-static struct hlist_head *gs_head(struct net *net, __be16 port)
+static struct geneve_sock *geneve_find_sock(struct net *net,
+                                           sa_family_t family, __be16 port)
 {
        struct geneve_net *gn = net_generic(net, geneve_net_id);
-
-       return &gn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
-}
-
-/* Find geneve socket based on network namespace and UDP port */
-static struct geneve_sock *geneve_find_sock(struct net *net, __be16 port)
-{
        struct geneve_sock *gs;
 
-       hlist_for_each_entry_rcu(gs, gs_head(net, port), hlist) {
-               if (inet_sk(gs->sock->sk)->inet_sport == port)
+       list_for_each_entry(gs, &gn->sock_list, list) {
+               if (inet_sk(gs->sock->sk)->inet_sport == port &&
+                   inet_sk(gs->sock->sk)->sk.sk_family == family)
                        return gs;
        }
 
@@ -115,19 +107,19 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
                    struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
                    __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
                    __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
-                   bool xnet)
+                   bool csum, bool xnet)
 {
        struct genevehdr *gnvh;
        int min_headroom;
        int err;
 
-       skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
+       skb = udp_tunnel_handle_offloads(skb, csum);
        if (IS_ERR(skb))
                return PTR_ERR(skb);
 
        min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
                        + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
-                       + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+                       + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
 
        err = skb_cow_head(skb, min_headroom);
        if (unlikely(err)) {
@@ -144,8 +136,9 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
 
        skb_set_inner_protocol(skb, htons(ETH_P_TEB));
 
-       return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst,
-                                  tos, ttl, df, src_port, dst_port, xnet);
+       return udp_tunnel_xmit_skb(rt, skb, src, dst,
+                                  tos, ttl, df, src_port, dst_port, xnet,
+                                  !csum);
 }
 EXPORT_SYMBOL_GPL(geneve_xmit_skb);
 
@@ -155,7 +148,8 @@ static int geneve_hlen(struct genevehdr *gh)
 }
 
 static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
-                                          struct sk_buff *skb)
+                                          struct sk_buff *skb,
+                                          struct udp_offload *uoff)
 {
        struct sk_buff *p, **pp = NULL;
        struct genevehdr *gh, *gh2;
@@ -219,7 +213,8 @@ out:
        return pp;
 }
 
-static int geneve_gro_complete(struct sk_buff *skb, int nhoff)
+static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
+                              struct udp_offload *uoff)
 {
        struct genevehdr *gh;
        struct packet_offload *ptype;
@@ -307,15 +302,6 @@ error:
        return 1;
 }
 
-static void geneve_del_work(struct work_struct *work)
-{
-       struct geneve_sock *gs = container_of(work, struct geneve_sock,
-                                             del_work);
-
-       udp_tunnel_sock_release(gs->sock);
-       kfree_rcu(gs, rcu);
-}
-
 static struct socket *geneve_create_sock(struct net *net, bool ipv6,
                                         __be16 port)
 {
@@ -356,8 +342,6 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
        if (!gs)
                return ERR_PTR(-ENOMEM);
 
-       INIT_WORK(&gs->del_work, geneve_del_work);
-
        sock = geneve_create_sock(net, ipv6, port);
        if (IS_ERR(sock)) {
                kfree(gs);
@@ -365,7 +349,7 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
        }
 
        gs->sock = sock;
-       atomic_set(&gs->refcnt, 1);
+       gs->refcnt = 1;
        gs->rcv = rcv;
        gs->rcv_data = data;
 
@@ -373,11 +357,7 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
        gs->udp_offloads.port = port;
        gs->udp_offloads.callbacks.gro_receive  = geneve_gro_receive;
        gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
-
-       spin_lock(&gn->sock_lock);
-       hlist_add_head_rcu(&gs->hlist, gs_head(net, port));
        geneve_notify_add_rx_port(gs);
-       spin_unlock(&gn->sock_lock);
 
        /* Mark socket as an encapsulation socket */
        tunnel_cfg.sk_user_data = gs;
@@ -386,6 +366,8 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
        tunnel_cfg.encap_destroy = NULL;
        setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
 
+       list_add(&gs->list, &gn->sock_list);
+
        return gs;
 }
 
@@ -393,25 +375,21 @@ struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
                                    geneve_rcv_t *rcv, void *data,
                                    bool no_share, bool ipv6)
 {
-       struct geneve_net *gn = net_generic(net, geneve_net_id);
        struct geneve_sock *gs;
 
-       gs = geneve_socket_create(net, port, rcv, data, ipv6);
-       if (!IS_ERR(gs))
-               return gs;
-
-       if (no_share)   /* Return error if sharing is not allowed. */
-               return ERR_PTR(-EINVAL);
+       mutex_lock(&geneve_mutex);
 
-       spin_lock(&gn->sock_lock);
-       gs = geneve_find_sock(net, port);
-       if (gs && ((gs->rcv != rcv) ||
-                  !atomic_add_unless(&gs->refcnt, 1, 0)))
+       gs = geneve_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
+       if (gs) {
+               if (!no_share && gs->rcv == rcv)
+                       gs->refcnt++;
+               else
                        gs = ERR_PTR(-EBUSY);
-       spin_unlock(&gn->sock_lock);
+       } else {
+               gs = geneve_socket_create(net, port, rcv, data, ipv6);
+       }
 
-       if (!gs)
-               gs = ERR_PTR(-EINVAL);
+       mutex_unlock(&geneve_mutex);
 
        return gs;
 }
@@ -419,37 +397,32 @@ EXPORT_SYMBOL_GPL(geneve_sock_add);
 
 void geneve_sock_release(struct geneve_sock *gs)
 {
-       struct net *net = sock_net(gs->sock->sk);
-       struct geneve_net *gn = net_generic(net, geneve_net_id);
+       mutex_lock(&geneve_mutex);
 
-       if (!atomic_dec_and_test(&gs->refcnt))
-               return;
+       if (--gs->refcnt)
+               goto unlock;
 
-       spin_lock(&gn->sock_lock);
-       hlist_del_rcu(&gs->hlist);
+       list_del(&gs->list);
        geneve_notify_del_rx_port(gs);
-       spin_unlock(&gn->sock_lock);
+       udp_tunnel_sock_release(gs->sock);
+       kfree_rcu(gs, rcu);
 
-       queue_work(geneve_wq, &gs->del_work);
+unlock:
+       mutex_unlock(&geneve_mutex);
 }
 EXPORT_SYMBOL_GPL(geneve_sock_release);
 
 static __net_init int geneve_init_net(struct net *net)
 {
        struct geneve_net *gn = net_generic(net, geneve_net_id);
-       unsigned int h;
 
-       spin_lock_init(&gn->sock_lock);
-
-       for (h = 0; h < PORT_HASH_SIZE; ++h)
-               INIT_HLIST_HEAD(&gn->sock_list[h]);
+       INIT_LIST_HEAD(&gn->sock_list);
 
        return 0;
 }
 
 static struct pernet_operations geneve_net_ops = {
        .init = geneve_init_net,
-       .exit = NULL,
        .id   = &geneve_net_id,
        .size = sizeof(struct geneve_net),
 };
@@ -458,10 +431,6 @@ static int __init geneve_init_module(void)
 {
        int rc;
 
-       geneve_wq = alloc_workqueue("geneve", 0, 0);
-       if (!geneve_wq)
-               return -ENOMEM;
-
        rc = register_pernet_subsys(&geneve_net_ops);
        if (rc)
                return rc;
@@ -470,11 +439,10 @@ static int __init geneve_init_module(void)
 
        return 0;
 }
-late_initcall(geneve_init_module);
+module_init(geneve_init_module);
 
 static void __exit geneve_cleanup_module(void)
 {
-       destroy_workqueue(geneve_wq);
        unregister_pernet_subsys(&geneve_net_ops);
 }
 module_exit(geneve_cleanup_module);
index 36f5584d93c5da194caad055505b2ca97807c988..5e564014a0b75d04a8f64d48c6d3a14fe6df18a1 100644 (file)
@@ -205,7 +205,7 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
  */
 static struct sock *icmp_sk(struct net *net)
 {
-       return net->ipv4.icmp_sk[smp_processor_id()];
+       return *this_cpu_ptr(net->ipv4.icmp_sk);
 }
 
 static inline struct sock *icmp_xmit_lock(struct net *net)
@@ -1140,8 +1140,8 @@ static void __net_exit icmp_sk_exit(struct net *net)
        int i;
 
        for_each_possible_cpu(i)
-               inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
-       kfree(net->ipv4.icmp_sk);
+               inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i));
+       free_percpu(net->ipv4.icmp_sk);
        net->ipv4.icmp_sk = NULL;
 }
 
@@ -1149,9 +1149,8 @@ static int __net_init icmp_sk_init(struct net *net)
 {
        int i, err;
 
-       net->ipv4.icmp_sk =
-               kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
-       if (net->ipv4.icmp_sk == NULL)
+       net->ipv4.icmp_sk = alloc_percpu(struct sock *);
+       if (!net->ipv4.icmp_sk)
                return -ENOMEM;
 
        for_each_possible_cpu(i) {
@@ -1162,7 +1161,7 @@ static int __net_init icmp_sk_init(struct net *net)
                if (err < 0)
                        goto fail;
 
-               net->ipv4.icmp_sk[i] = sk;
+               *per_cpu_ptr(net->ipv4.icmp_sk, i) = sk;
 
                /* Enough space for 2 64K ICMP packets, including
                 * sk_buff/skb_shared_info struct overhead.
@@ -1203,8 +1202,8 @@ static int __net_init icmp_sk_init(struct net *net)
 
 fail:
        for_each_possible_cpu(i)
-               inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
-       kfree(net->ipv4.icmp_sk);
+               inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i));
+       free_percpu(net->ipv4.icmp_sk);
        return err;
 }
 
index e34dccbc4d70bd26f7cec88c031dcea31124314a..81751f12645f6224a7f076dea1da2fc0d638819a 100644 (file)
@@ -203,7 +203,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                icsk->icsk_ca_ops->get_info(sk, ext, skb);
 
 out:
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 errout:
        nlmsg_cancel(skb, nlh);
@@ -271,7 +272,8 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
        }
 #endif
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 }
 
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
@@ -758,7 +760,8 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
        }
 #endif
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 }
 
 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
index 3a83ce5efa80e3fc2c062ec08465840018159b14..787b3c294ce672244ce08c5426c03bbd1f71c0f3 100644 (file)
@@ -129,7 +129,8 @@ int ip_forward(struct sk_buff *skb)
         *      We now generate an ICMP HOST REDIRECT giving the route
         *      we calculated.
         */
-       if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb))
+       if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr &&
+           !skb_sec_path(skb))
                ip_rt_send_redirect(skb);
 
        skb->priority = rt_tos2priority(iph->tos);
index 942576e27df163fe4c851bfb06346e5f7cab84ee..6207275fc749fc52f00e63d6cae212148a791f1b 100644 (file)
@@ -659,12 +659,12 @@ static bool ipgre_netlink_encap_parms(struct nlattr *data[],
 
        if (data[IFLA_GRE_ENCAP_SPORT]) {
                ret = true;
-               ipencap->sport = nla_get_u16(data[IFLA_GRE_ENCAP_SPORT]);
+               ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
        }
 
        if (data[IFLA_GRE_ENCAP_DPORT]) {
                ret = true;
-               ipencap->dport = nla_get_u16(data[IFLA_GRE_ENCAP_DPORT]);
+               ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
        }
 
        return ret;
@@ -786,10 +786,10 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
 
        if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
                        t->encap.type) ||
-           nla_put_u16(skb, IFLA_GRE_ENCAP_SPORT,
-                       t->encap.sport) ||
-           nla_put_u16(skb, IFLA_GRE_ENCAP_DPORT,
-                       t->encap.dport) ||
+           nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
+                        t->encap.sport) ||
+           nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
+                        t->encap.dport) ||
            nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
                        t->encap.flags))
                goto nla_put_failure;
@@ -829,6 +829,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
        .dellink        = ip_tunnel_dellink,
        .get_size       = ipgre_get_size,
        .fill_info      = ipgre_fill_info,
+       .get_link_net   = ip_tunnel_get_link_net,
 };
 
 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
@@ -843,6 +844,7 @@ static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
        .dellink        = ip_tunnel_dellink,
        .get_size       = ipgre_get_size,
        .fill_info      = ipgre_fill_info,
+       .get_link_net   = ip_tunnel_get_link_net,
 };
 
 static int __net_init ipgre_tap_init_net(struct net *net)
index b50861b22b6bea036b1a99ddf141d7ed2d6cf6cd..d68199d9b2b01faf7a5272862b7cd8658f08c8ae 100644 (file)
@@ -755,13 +755,11 @@ ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk
        struct msghdr *msg = from;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               /* XXX: stripping const */
-               if (memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len) < 0)
+               if (copy_from_iter(to, len, &msg->msg_iter) != len)
                        return -EFAULT;
        } else {
                __wsum csum = 0;
-               /* XXX: stripping const */
-               if (csum_partial_copy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len, &csum) < 0)
+               if (csum_and_copy_from_iter(to, len, &csum, &msg->msg_iter) != len)
                        return -EFAULT;
                skb->csum = csum_block_add(skb->csum, csum, odd);
        }
@@ -1506,23 +1504,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
 /*
  *     Generic function to send a packet as reply to another packet.
  *     Used to send some TCP resets/acks so far.
- *
- *     Use a fake percpu inet socket to avoid false sharing and contention.
  */
-static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
-       .sk = {
-               .__sk_common = {
-                       .skc_refcnt = ATOMIC_INIT(1),
-               },
-               .sk_wmem_alloc  = ATOMIC_INIT(1),
-               .sk_allocation  = GFP_ATOMIC,
-               .sk_flags       = (1UL << SOCK_USE_WRITE_QUEUE),
-       },
-       .pmtudisc       = IP_PMTUDISC_WANT,
-       .uc_ttl         = -1,
-};
-
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
+void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
                           const struct ip_options *sopt,
                           __be32 daddr, __be32 saddr,
                           const struct ip_reply_arg *arg,
@@ -1532,9 +1515,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
        struct ipcm_cookie ipc;
        struct flowi4 fl4;
        struct rtable *rt = skb_rtable(skb);
+       struct net *net = sock_net(sk);
        struct sk_buff *nskb;
-       struct sock *sk;
-       struct inet_sock *inet;
        int err;
 
        if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
@@ -1565,15 +1547,11 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
        if (IS_ERR(rt))
                return;
 
-       inet = &get_cpu_var(unicast_sock);
+       inet_sk(sk)->tos = arg->tos;
 
-       inet->tos = arg->tos;
-       sk = &inet->sk;
        sk->sk_priority = skb->priority;
        sk->sk_protocol = ip_hdr(skb)->protocol;
        sk->sk_bound_dev_if = arg->bound_dev_if;
-       sock_net_set(sk, net);
-       __skb_queue_head_init(&sk->sk_write_queue);
        sk->sk_sndbuf = sysctl_wmem_default;
        err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
                             len, 0, &ipc, &rt, MSG_DONTWAIT);
@@ -1589,13 +1567,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
                          arg->csumoffset) = csum_fold(csum_add(nskb->csum,
                                                                arg->csum));
                nskb->ip_summed = CHECKSUM_NONE;
-               skb_orphan(nskb);
                skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
                ip_push_pending_frames(sk, &fl4);
        }
 out:
-       put_cpu_var(unicast_sock);
-
        ip_rt_put(rt);
 }
 
index 8a89c738b7a3b43407293f521bd6d7e009ee7c80..31d8c71986b40e28e5c84f5e62d474a639d3bf91 100644 (file)
@@ -37,6 +37,7 @@
 #include <net/route.h>
 #include <net/xfrm.h>
 #include <net/compat.h>
+#include <net/checksum.h>
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/transp_v6.h>
 #endif
 #include <linux/errqueue.h>
 #include <asm/uaccess.h>
 
-#define IP_CMSG_PKTINFO                1
-#define IP_CMSG_TTL            2
-#define IP_CMSG_TOS            4
-#define IP_CMSG_RECVOPTS       8
-#define IP_CMSG_RETOPTS                16
-#define IP_CMSG_PASSSEC                32
-#define IP_CMSG_ORIGDSTADDR     64
-
 /*
  *     SOL_IP control messages.
  */
@@ -104,6 +97,20 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
        put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
 }
 
+static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
+                                 int offset)
+{
+       __wsum csum = skb->csum;
+
+       if (skb->ip_summed != CHECKSUM_COMPLETE)
+               return;
+
+       if (offset != 0)
+               csum = csum_sub(csum, csum_partial(skb->data, offset, 0));
+
+       put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
+}
+
 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
 {
        char *secdata;
@@ -144,47 +151,73 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
        put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
 }
 
-void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
+void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
+                        int offset)
 {
        struct inet_sock *inet = inet_sk(skb->sk);
        unsigned int flags = inet->cmsg_flags;
 
        /* Ordered by supposed usage frequency */
-       if (flags & 1)
+       if (flags & IP_CMSG_PKTINFO) {
                ip_cmsg_recv_pktinfo(msg, skb);
-       if ((flags >>= 1) == 0)
-               return;
 
-       if (flags & 1)
+               flags &= ~IP_CMSG_PKTINFO;
+               if (!flags)
+                       return;
+       }
+
+       if (flags & IP_CMSG_TTL) {
                ip_cmsg_recv_ttl(msg, skb);
-       if ((flags >>= 1) == 0)
-               return;
 
-       if (flags & 1)
+               flags &= ~IP_CMSG_TTL;
+               if (!flags)
+                       return;
+       }
+
+       if (flags & IP_CMSG_TOS) {
                ip_cmsg_recv_tos(msg, skb);
-       if ((flags >>= 1) == 0)
-               return;
 
-       if (flags & 1)
+               flags &= ~IP_CMSG_TOS;
+               if (!flags)
+                       return;
+       }
+
+       if (flags & IP_CMSG_RECVOPTS) {
                ip_cmsg_recv_opts(msg, skb);
-       if ((flags >>= 1) == 0)
-               return;
 
-       if (flags & 1)
+               flags &= ~IP_CMSG_RECVOPTS;
+               if (!flags)
+                       return;
+       }
+
+       if (flags & IP_CMSG_RETOPTS) {
                ip_cmsg_recv_retopts(msg, skb);
-       if ((flags >>= 1) == 0)
-               return;
 
-       if (flags & 1)
+               flags &= ~IP_CMSG_RETOPTS;
+               if (!flags)
+                       return;
+       }
+
+       if (flags & IP_CMSG_PASSSEC) {
                ip_cmsg_recv_security(msg, skb);
 
-       if ((flags >>= 1) == 0)
-               return;
-       if (flags & 1)
+               flags &= ~IP_CMSG_PASSSEC;
+               if (!flags)
+                       return;
+       }
+
+       if (flags & IP_CMSG_ORIGDSTADDR) {
                ip_cmsg_recv_dstaddr(msg, skb);
 
+               flags &= ~IP_CMSG_ORIGDSTADDR;
+               if (!flags)
+                       return;
+       }
+
+       if (flags & IP_CMSG_CHECKSUM)
+               ip_cmsg_recv_checksum(msg, skb, offset);
 }
-EXPORT_SYMBOL(ip_cmsg_recv);
+EXPORT_SYMBOL(ip_cmsg_recv_offset);
 
 int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
                 bool allow_ipv6)
@@ -450,7 +483,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
 
        serr = SKB_EXT_ERR(skb);
 
-       if (sin) {
+       if (sin && skb->len) {
                sin->sin_family = AF_INET;
                sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
                                                   serr->addr_offset);
@@ -461,17 +494,14 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
 
        memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
        sin = &errhdr.offender;
-       sin->sin_family = AF_UNSPEC;
-
-       if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
-           ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin)) {
-               struct inet_sock *inet = inet_sk(sk);
+       memset(sin, 0, sizeof(*sin));
 
+       if (skb->len &&
+           (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
+            ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin))) {
                sin->sin_family = AF_INET;
                sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
-               sin->sin_port = 0;
-               memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
-               if (inet->cmsg_flags)
+               if (inet_sk(sk)->cmsg_flags)
                        ip_cmsg_recv(msg, skb);
        }
 
@@ -522,6 +552,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
        case IP_MULTICAST_ALL:
        case IP_MULTICAST_LOOP:
        case IP_RECVORIGDSTADDR:
+       case IP_CHECKSUM:
                if (optlen >= sizeof(int)) {
                        if (get_user(val, (int __user *) optval))
                                return -EFAULT;
@@ -619,6 +650,19 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                else
                        inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
                break;
+       case IP_CHECKSUM:
+               if (val) {
+                       if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) {
+                               inet_inc_convert_csum(sk);
+                               inet->cmsg_flags |= IP_CMSG_CHECKSUM;
+                       }
+               } else {
+                       if (inet->cmsg_flags & IP_CMSG_CHECKSUM) {
+                               inet_dec_convert_csum(sk);
+                               inet->cmsg_flags &= ~IP_CMSG_CHECKSUM;
+                       }
+               }
+               break;
        case IP_TOS:    /* This sets both TOS and Precedence */
                if (sk->sk_type == SOCK_STREAM) {
                        val &= ~INET_ECN_MASK;
@@ -1222,6 +1266,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
        case IP_RECVORIGDSTADDR:
                val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
                break;
+       case IP_CHECKSUM:
+               val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0;
+               break;
        case IP_TOS:
                val = inet->tos;
                break;
index d3e4479367208cd16d4ea50b012e23bfafa76357..2cd08280c77bc33cac90c62e0f6f8f36343a768d 100644 (file)
@@ -972,6 +972,14 @@ void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
 
+struct net *ip_tunnel_get_link_net(const struct net_device *dev)
+{
+       struct ip_tunnel *tunnel = netdev_priv(dev);
+
+       return tunnel->net;
+}
+EXPORT_SYMBOL(ip_tunnel_get_link_net);
+
 int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
                                  struct rtnl_link_ops *ops, char *devname)
 {
index 1a7e979e80ba356f685ecfe020b98855f19db0a3..94efe148181cde3bef3b58ad8d6dd32deee8747f 100644 (file)
@@ -531,6 +531,7 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
        .dellink        = ip_tunnel_dellink,
        .get_size       = vti_get_size,
        .fill_info      = vti_fill_info,
+       .get_link_net   = ip_tunnel_get_link_net,
 };
 
 static int __init vti_init(void)
index 7fa18bc7e47fd3199c18247f19390d61f6e83013..b26376ef87f616d249dbfebbd6705eb818fefd05 100644 (file)
@@ -209,9 +209,9 @@ static int __init ic_open_devs(void)
        last = &ic_first_dev;
        rtnl_lock();
 
-       /* bring loopback device up first */
+       /* bring loopback and DSA master network devices up first */
        for_each_netdev(&init_net, dev) {
-               if (!(dev->flags & IFF_LOOPBACK))
+               if (!(dev->flags & IFF_LOOPBACK) && !netdev_uses_dsa(dev))
                        continue;
                if (dev_change_flags(dev, dev->flags | IFF_UP) < 0)
                        pr_err("IP-Config: Failed to open %s\n", dev->name);
@@ -306,7 +306,7 @@ static void __init ic_close_devs(void)
        while ((d = next)) {
                next = d->next;
                dev = d->dev;
-               if (dev != ic_dev) {
+               if (dev != ic_dev && !netdev_uses_dsa(dev)) {
                        DBG(("IP-Config: Downing %s\n", dev->name));
                        dev_change_flags(dev, d->flags);
                }
index 40403114f00a115a6726e0a1b322aa01ca4db0b0..915d215a7d145028ced69ea6e17f2c0ff40d1c7f 100644 (file)
@@ -366,12 +366,12 @@ static bool ipip_netlink_encap_parms(struct nlattr *data[],
 
        if (data[IFLA_IPTUN_ENCAP_SPORT]) {
                ret = true;
-               ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]);
+               ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
        }
 
        if (data[IFLA_IPTUN_ENCAP_DPORT]) {
                ret = true;
-               ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]);
+               ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
        }
 
        return ret;
@@ -460,10 +460,10 @@ static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
 
        if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
                        tunnel->encap.type) ||
-           nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT,
-                       tunnel->encap.sport) ||
-           nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT,
-                       tunnel->encap.dport) ||
+           nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
+                        tunnel->encap.sport) ||
+           nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
+                        tunnel->encap.dport) ||
            nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
                        tunnel->encap.flags))
                goto nla_put_failure;
@@ -498,6 +498,7 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly = {
        .dellink        = ip_tunnel_dellink,
        .get_size       = ipip_get_size,
        .fill_info      = ipip_fill_info,
+       .get_link_net   = ip_tunnel_get_link_net,
 };
 
 static struct xfrm_tunnel ipip_handler __read_mostly = {
index c8034587859d3ebdda204711a7939abe8f66a22d..9d78427652d23e33a46ab7ce2d4b6dbac1660781 100644 (file)
@@ -2290,7 +2290,8 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
        if (err < 0 && err != -ENOENT)
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
index ff2d23d8c87a16964e29f478640dccd5ca527a02..6ecfce63201a2753d4943813d3eccbb951f34d0b 100644 (file)
@@ -27,10 +27,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
 
        memset(&mr, 0, sizeof(mr));
        if (priv->sreg_proto_min) {
-               mr.range[0].min.all = (__force __be16)
-                                       data[priv->sreg_proto_min].data[0];
-               mr.range[0].max.all = (__force __be16)
-                                       data[priv->sreg_proto_max].data[0];
+               mr.range[0].min.all =
+                       *(__be16 *)&data[priv->sreg_proto_min].data[0];
+               mr.range[0].max.all =
+                       *(__be16 *)&data[priv->sreg_proto_max].data[0];
                mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index c0d82f78d364fe5561f819f953e3eb48d2f134da..e9f66e1cda507cf2d5cb532958d23a89beeccaba 100644 (file)
@@ -599,18 +599,18 @@ int ping_getfrag(void *from, char *to,
        struct pingfakehdr *pfh = (struct pingfakehdr *)from;
 
        if (offset == 0) {
-               if (fraglen < sizeof(struct icmphdr))
+               fraglen -= sizeof(struct icmphdr);
+               if (fraglen < 0)
                        BUG();
-               if (csum_partial_copy_fromiovecend(to + sizeof(struct icmphdr),
-                           pfh->iov, 0, fraglen - sizeof(struct icmphdr),
-                           &pfh->wcheck))
+               if (csum_and_copy_from_iter(to + sizeof(struct icmphdr),
+                           fraglen, &pfh->wcheck,
+                           &pfh->msg->msg_iter) != fraglen)
                        return -EFAULT;
        } else if (offset < sizeof(struct icmphdr)) {
                        BUG();
        } else {
-               if (csum_partial_copy_fromiovecend
-                               (to, pfh->iov, offset - sizeof(struct icmphdr),
-                                fraglen, &pfh->wcheck))
+               if (csum_and_copy_from_iter(to, fraglen, &pfh->wcheck,
+                                           &pfh->msg->msg_iter) != fraglen)
                        return -EFAULT;
        }
 
@@ -811,8 +811,7 @@ back_from_confirm:
        pfh.icmph.checksum = 0;
        pfh.icmph.un.echo.id = inet->inet_sport;
        pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence;
-       /* XXX: stripping const */
-       pfh.iov = (struct iovec *)msg->msg_iter.iov;
+       pfh.msg = msg;
        pfh.wcheck = 0;
        pfh.family = AF_INET;
 
@@ -966,8 +965,11 @@ bool ping_rcv(struct sk_buff *skb)
 
        sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
        if (sk != NULL) {
+               struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+
                pr_debug("rcv on socket %p\n", sk);
-               ping_queue_rcv_skb(sk, skb_get(skb));
+               if (skb2)
+                       ping_queue_rcv_skb(sk, skb2);
                sock_put(sk);
                return true;
        }
index 8f9cd200ce20118f5e5af7d2faa115d0f19d57f0..d8953ef0770ca14bbb9c883a18354bcac321db7b 100644 (file)
@@ -292,6 +292,12 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPHystartTrainCwnd", LINUX_MIB_TCPHYSTARTTRAINCWND),
        SNMP_MIB_ITEM("TCPHystartDelayDetect", LINUX_MIB_TCPHYSTARTDELAYDETECT),
        SNMP_MIB_ITEM("TCPHystartDelayCwnd", LINUX_MIB_TCPHYSTARTDELAYCWND),
+       SNMP_MIB_ITEM("TCPACKSkippedSynRecv", LINUX_MIB_TCPACKSKIPPEDSYNRECV),
+       SNMP_MIB_ITEM("TCPACKSkippedPAWS", LINUX_MIB_TCPACKSKIPPEDPAWS),
+       SNMP_MIB_ITEM("TCPACKSkippedSeq", LINUX_MIB_TCPACKSKIPPEDSEQ),
+       SNMP_MIB_ITEM("TCPACKSkippedFinWait2", LINUX_MIB_TCPACKSKIPPEDFINWAIT2),
+       SNMP_MIB_ITEM("TCPACKSkippedTimeWait", LINUX_MIB_TCPACKSKIPPEDTIMEWAIT),
+       SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE),
        SNMP_MIB_SENTINEL
 };
 
index 0bb68df5055d2d3f92cb06e829a997b44b512d65..f027a708b7e01029574535e20f7461cfa4b84190 100644 (file)
@@ -337,7 +337,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
 }
 
 static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
-                          void *from, size_t length,
+                          struct msghdr *msg, size_t length,
                           struct rtable **rtp,
                           unsigned int flags)
 {
@@ -382,7 +382,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
 
        skb->transport_header = skb->network_header;
        err = -EFAULT;
-       if (memcpy_fromiovecend((void *)iph, from, 0, length))
+       if (memcpy_from_msg(iph, msg, length))
                goto error_free;
 
        iphlen = iph->ihl * 4;
@@ -625,8 +625,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 back_from_confirm:
 
        if (inet->hdrincl)
-               /* XXX: stripping const */
-               err = raw_send_hdrinc(sk, &fl4, (struct iovec *)msg->msg_iter.iov, len,
+               err = raw_send_hdrinc(sk, &fl4, msg, len,
                                      &rt, msg->msg_flags);
 
         else {
index 6a2155b02602b100c7ce3bbfda28a38090add7a4..ad5064362c5c7da56e9e22f1a012972bfff58e0e 100644 (file)
@@ -966,6 +966,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
        if (dst->dev->mtu < mtu)
                return;
 
+       if (rt->rt_pmtu && rt->rt_pmtu < mtu)
+               return;
+
        if (mtu < ip_rt_min_pmtu)
                mtu = ip_rt_min_pmtu;
 
@@ -1325,14 +1328,22 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
        return ret;
 }
 
-static DEFINE_SPINLOCK(rt_uncached_lock);
-static LIST_HEAD(rt_uncached_list);
+struct uncached_list {
+       spinlock_t              lock;
+       struct list_head        head;
+};
+
+static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
 
 static void rt_add_uncached_list(struct rtable *rt)
 {
-       spin_lock_bh(&rt_uncached_lock);
-       list_add_tail(&rt->rt_uncached, &rt_uncached_list);
-       spin_unlock_bh(&rt_uncached_lock);
+       struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
+
+       rt->rt_uncached_list = ul;
+
+       spin_lock_bh(&ul->lock);
+       list_add_tail(&rt->rt_uncached, &ul->head);
+       spin_unlock_bh(&ul->lock);
 }
 
 static void ipv4_dst_destroy(struct dst_entry *dst)
@@ -1340,27 +1351,32 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
        struct rtable *rt = (struct rtable *) dst;
 
        if (!list_empty(&rt->rt_uncached)) {
-               spin_lock_bh(&rt_uncached_lock);
+               struct uncached_list *ul = rt->rt_uncached_list;
+
+               spin_lock_bh(&ul->lock);
                list_del(&rt->rt_uncached);
-               spin_unlock_bh(&rt_uncached_lock);
+               spin_unlock_bh(&ul->lock);
        }
 }
 
 void rt_flush_dev(struct net_device *dev)
 {
-       if (!list_empty(&rt_uncached_list)) {
-               struct net *net = dev_net(dev);
-               struct rtable *rt;
+       struct net *net = dev_net(dev);
+       struct rtable *rt;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
 
-               spin_lock_bh(&rt_uncached_lock);
-               list_for_each_entry(rt, &rt_uncached_list, rt_uncached) {
+               spin_lock_bh(&ul->lock);
+               list_for_each_entry(rt, &ul->head, rt_uncached) {
                        if (rt->dst.dev != dev)
                                continue;
                        rt->dst.dev = net->loopback_dev;
                        dev_hold(rt->dst.dev);
                        dev_put(dev);
                }
-               spin_unlock_bh(&rt_uncached_lock);
+               spin_unlock_bh(&ul->lock);
        }
 }
 
@@ -1554,11 +1570,10 @@ static int __mkroute_input(struct sk_buff *skb,
 
        do_cache = res->fi && !itag;
        if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
+           skb->protocol == htons(ETH_P_IP) &&
            (IN_DEV_SHARED_MEDIA(out_dev) ||
-            inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
-               flags |= RTCF_DOREDIRECT;
-               do_cache = false;
-       }
+            inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
+               IPCB(skb)->flags |= IPSKB_DOREDIRECT;
 
        if (skb->protocol != htons(ETH_P_IP)) {
                /* Not IP (i.e. ARP). Do not create route, if it is
@@ -2303,6 +2318,8 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
        r->rtm_flags    = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
        if (rt->rt_flags & RTCF_NOTIFY)
                r->rtm_flags |= RTM_F_NOTIFY;
+       if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
+               r->rtm_flags |= RTCF_DOREDIRECT;
 
        if (nla_put_be32(skb, RTA_DST, dst))
                goto nla_put_failure;
@@ -2377,7 +2394,8 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
        if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -2469,7 +2487,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        err = rt_fill_info(net, dst, src, &fl4, skb,
                           NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
                           RTM_NEWROUTE, 0, 0);
-       if (err <= 0)
+       if (err < 0)
                goto errout_free;
 
        err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
@@ -2717,6 +2735,7 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
 int __init ip_rt_init(void)
 {
        int rc = 0;
+       int cpu;
 
        ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
        if (!ip_idents)
@@ -2724,6 +2743,12 @@ int __init ip_rt_init(void)
 
        prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
 
+       for_each_possible_cpu(cpu) {
+               struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
+
+               INIT_LIST_HEAD(&ul->head);
+               spin_lock_init(&ul->lock);
+       }
 #ifdef CONFIG_IP_ROUTE_CLASSID
        ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
        if (!ip_rt_acct)
index e0ee384a448fb0e6eb5b957d98dbcb272ea97edb..82601a68cf905cd871905a522cd919c670817fc8 100644 (file)
@@ -728,6 +728,13 @@ static struct ctl_table ipv4_table[] = {
                .extra1         = &zero,
                .extra2         = &one,
        },
+       {
+               .procname       = "tcp_invalid_ratelimit",
+               .data           = &sysctl_tcp_invalid_ratelimit,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_ms_jiffies,
+       },
        {
                .procname       = "icmp_msgs_per_sec",
                .data           = &sysctl_icmp_msgs_per_sec,
index 3075723c729bc98edf3a15eb0d0fbe172c300bbc..9d72a0fcd9284425e088cef6e1b8c14e95950ca4 100644 (file)
@@ -1067,11 +1067,10 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                size_t size)
 {
-       const struct iovec *iov;
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       int iovlen, flags, err, copied = 0;
-       int mss_now = 0, size_goal, copied_syn = 0, offset = 0;
+       int flags, err, copied = 0;
+       int mss_now = 0, size_goal, copied_syn = 0;
        bool sg;
        long timeo;
 
@@ -1084,7 +1083,6 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        goto out;
                else if (err)
                        goto out_err;
-               offset = copied_syn;
        }
 
        timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
@@ -1118,8 +1116,6 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        mss_now = tcp_send_mss(sk, &size_goal, flags);
 
        /* Ok commence sending. */
-       iovlen = msg->msg_iter.nr_segs;
-       iov = msg->msg_iter.iov;
        copied = 0;
 
        err = -EPIPE;
@@ -1128,151 +1124,134 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        sg = !!(sk->sk_route_caps & NETIF_F_SG);
 
-       while (--iovlen >= 0) {
-               size_t seglen = iov->iov_len;
-               unsigned char __user *from = iov->iov_base;
+       while (iov_iter_count(&msg->msg_iter)) {
+               int copy = 0;
+               int max = size_goal;
 
-               iov++;
-               if (unlikely(offset > 0)) {  /* Skip bytes copied in SYN */
-                       if (offset >= seglen) {
-                               offset -= seglen;
-                               continue;
-                       }
-                       seglen -= offset;
-                       from += offset;
-                       offset = 0;
+               skb = tcp_write_queue_tail(sk);
+               if (tcp_send_head(sk)) {
+                       if (skb->ip_summed == CHECKSUM_NONE)
+                               max = mss_now;
+                       copy = max - skb->len;
                }
 
-               while (seglen > 0) {
-                       int copy = 0;
-                       int max = size_goal;
-
-                       skb = tcp_write_queue_tail(sk);
-                       if (tcp_send_head(sk)) {
-                               if (skb->ip_summed == CHECKSUM_NONE)
-                                       max = mss_now;
-                               copy = max - skb->len;
-                       }
-
-                       if (copy <= 0) {
+               if (copy <= 0) {
 new_segment:
-                               /* Allocate new segment. If the interface is SG,
-                                * allocate skb fitting to single page.
-                                */
-                               if (!sk_stream_memory_free(sk))
-                                       goto wait_for_sndbuf;
+                       /* Allocate new segment. If the interface is SG,
+                        * allocate skb fitting to single page.
+                        */
+                       if (!sk_stream_memory_free(sk))
+                               goto wait_for_sndbuf;
 
-                               skb = sk_stream_alloc_skb(sk,
-                                                         select_size(sk, sg),
-                                                         sk->sk_allocation);
-                               if (!skb)
-                                       goto wait_for_memory;
+                       skb = sk_stream_alloc_skb(sk,
+                                                 select_size(sk, sg),
+                                                 sk->sk_allocation);
+                       if (!skb)
+                               goto wait_for_memory;
 
-                               /*
-                                * Check whether we can use HW checksum.
-                                */
-                               if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
-                                       skb->ip_summed = CHECKSUM_PARTIAL;
+                       /*
+                        * Check whether we can use HW checksum.
+                        */
+                       if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
+                               skb->ip_summed = CHECKSUM_PARTIAL;
 
-                               skb_entail(sk, skb);
-                               copy = size_goal;
-                               max = size_goal;
+                       skb_entail(sk, skb);
+                       copy = size_goal;
+                       max = size_goal;
 
-                               /* All packets are restored as if they have
-                                * already been sent. skb_mstamp isn't set to
-                                * avoid wrong rtt estimation.
-                                */
-                               if (tp->repair)
-                                       TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
-                       }
+                       /* All packets are restored as if they have
+                        * already been sent. skb_mstamp isn't set to
+                        * avoid wrong rtt estimation.
+                        */
+                       if (tp->repair)
+                               TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
+               }
 
-                       /* Try to append data to the end of skb. */
-                       if (copy > seglen)
-                               copy = seglen;
-
-                       /* Where to copy to? */
-                       if (skb_availroom(skb) > 0) {
-                               /* We have some space in skb head. Superb! */
-                               copy = min_t(int, copy, skb_availroom(skb));
-                               err = skb_add_data_nocache(sk, skb, from, copy);
-                               if (err)
-                                       goto do_fault;
-                       } else {
-                               bool merge = true;
-                               int i = skb_shinfo(skb)->nr_frags;
-                               struct page_frag *pfrag = sk_page_frag(sk);
-
-                               if (!sk_page_frag_refill(sk, pfrag))
-                                       goto wait_for_memory;
-
-                               if (!skb_can_coalesce(skb, i, pfrag->page,
-                                                     pfrag->offset)) {
-                                       if (i == MAX_SKB_FRAGS || !sg) {
-                                               tcp_mark_push(tp, skb);
-                                               goto new_segment;
-                                       }
-                                       merge = false;
-                               }
+               /* Try to append data to the end of skb. */
+               if (copy > iov_iter_count(&msg->msg_iter))
+                       copy = iov_iter_count(&msg->msg_iter);
+
+               /* Where to copy to? */
+               if (skb_availroom(skb) > 0) {
+                       /* We have some space in skb head. Superb! */
+                       copy = min_t(int, copy, skb_availroom(skb));
+                       err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
+                       if (err)
+                               goto do_fault;
+               } else {
+                       bool merge = true;
+                       int i = skb_shinfo(skb)->nr_frags;
+                       struct page_frag *pfrag = sk_page_frag(sk);
+
+                       if (!sk_page_frag_refill(sk, pfrag))
+                               goto wait_for_memory;
 
-                               copy = min_t(int, copy, pfrag->size - pfrag->offset);
-
-                               if (!sk_wmem_schedule(sk, copy))
-                                       goto wait_for_memory;
-
-                               err = skb_copy_to_page_nocache(sk, from, skb,
-                                                              pfrag->page,
-                                                              pfrag->offset,
-                                                              copy);
-                               if (err)
-                                       goto do_error;
-
-                               /* Update the skb. */
-                               if (merge) {
-                                       skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
-                               } else {
-                                       skb_fill_page_desc(skb, i, pfrag->page,
-                                                          pfrag->offset, copy);
-                                       get_page(pfrag->page);
+                       if (!skb_can_coalesce(skb, i, pfrag->page,
+                                             pfrag->offset)) {
+                               if (i == MAX_SKB_FRAGS || !sg) {
+                                       tcp_mark_push(tp, skb);
+                                       goto new_segment;
                                }
-                               pfrag->offset += copy;
+                               merge = false;
                        }
 
-                       if (!copied)
-                               TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
+                       copy = min_t(int, copy, pfrag->size - pfrag->offset);
 
-                       tp->write_seq += copy;
-                       TCP_SKB_CB(skb)->end_seq += copy;
-                       tcp_skb_pcount_set(skb, 0);
+                       if (!sk_wmem_schedule(sk, copy))
+                               goto wait_for_memory;
 
-                       from += copy;
-                       copied += copy;
-                       if ((seglen -= copy) == 0 && iovlen == 0) {
-                               tcp_tx_timestamp(sk, skb);
-                               goto out;
+                       err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
+                                                      pfrag->page,
+                                                      pfrag->offset,
+                                                      copy);
+                       if (err)
+                               goto do_error;
+
+                       /* Update the skb. */
+                       if (merge) {
+                               skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
+                       } else {
+                               skb_fill_page_desc(skb, i, pfrag->page,
+                                                  pfrag->offset, copy);
+                               get_page(pfrag->page);
                        }
+                       pfrag->offset += copy;
+               }
 
-                       if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
-                               continue;
+               if (!copied)
+                       TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
+
+               tp->write_seq += copy;
+               TCP_SKB_CB(skb)->end_seq += copy;
+               tcp_skb_pcount_set(skb, 0);
+
+               copied += copy;
+               if (!iov_iter_count(&msg->msg_iter)) {
+                       tcp_tx_timestamp(sk, skb);
+                       goto out;
+               }
 
-                       if (forced_push(tp)) {
-                               tcp_mark_push(tp, skb);
-                               __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
-                       } else if (skb == tcp_send_head(sk))
-                               tcp_push_one(sk, mss_now);
+               if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
                        continue;
 
+               if (forced_push(tp)) {
+                       tcp_mark_push(tp, skb);
+                       __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
+               } else if (skb == tcp_send_head(sk))
+                       tcp_push_one(sk, mss_now);
+               continue;
+
 wait_for_sndbuf:
-                       set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 wait_for_memory:
-                       if (copied)
-                               tcp_push(sk, flags & ~MSG_MORE, mss_now,
-                                        TCP_NAGLE_PUSH, size_goal);
+               if (copied)
+                       tcp_push(sk, flags & ~MSG_MORE, mss_now,
+                                TCP_NAGLE_PUSH, size_goal);
 
-                       if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
-                               goto do_error;
+               if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
+                       goto do_error;
 
-                       mss_now = tcp_send_mss(sk, &size_goal, flags);
-               }
+               mss_now = tcp_send_mss(sk, &size_goal, flags);
        }
 
 out:
index bb395d46a3898136afe615c73ac311fd2832f6f1..c037644eafb7caadcb196b1c8b676bbc42abdb93 100644 (file)
@@ -150,7 +150,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                tcp_slow_start(tp, acked);
        else {
                bictcp_update(ca, tp->snd_cwnd);
-               tcp_cong_avoid_ai(tp, ca->cnt);
+               tcp_cong_avoid_ai(tp, ca->cnt, 1);
        }
 }
 
index 27ead0dd16bc7e444e96781ff01b10c444678396..d694088214cd87fa50e413730402499610c546e6 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/gfp.h>
+#include <linux/jhash.h>
 #include <net/tcp.h>
 
 static DEFINE_SPINLOCK(tcp_cong_list_lock);
@@ -31,6 +32,34 @@ static struct tcp_congestion_ops *tcp_ca_find(const char *name)
        return NULL;
 }
 
+/* Must be called with rcu lock held */
+static const struct tcp_congestion_ops *__tcp_ca_find_autoload(const char *name)
+{
+       const struct tcp_congestion_ops *ca = tcp_ca_find(name);
+#ifdef CONFIG_MODULES
+       if (!ca && capable(CAP_NET_ADMIN)) {
+               rcu_read_unlock();
+               request_module("tcp_%s", name);
+               rcu_read_lock();
+               ca = tcp_ca_find(name);
+       }
+#endif
+       return ca;
+}
+
+/* Simple linear search, not much in here. */
+struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
+{
+       struct tcp_congestion_ops *e;
+
+       list_for_each_entry_rcu(e, &tcp_cong_list, list) {
+               if (e->key == key)
+                       return e;
+       }
+
+       return NULL;
+}
+
 /*
  * Attach new congestion control algorithm to the list
  * of available options.
@@ -45,9 +74,12 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
                return -EINVAL;
        }
 
+       ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
+
        spin_lock(&tcp_cong_list_lock);
-       if (tcp_ca_find(ca->name)) {
-               pr_notice("%s already registered\n", ca->name);
+       if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
+               pr_notice("%s already registered or non-unique key\n",
+                         ca->name);
                ret = -EEXIST;
        } else {
                list_add_tail_rcu(&ca->list, &tcp_cong_list);
@@ -70,9 +102,50 @@ void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
        spin_lock(&tcp_cong_list_lock);
        list_del_rcu(&ca->list);
        spin_unlock(&tcp_cong_list_lock);
+
+       /* Wait for outstanding readers to complete before the
+        * module gets removed entirely.
+        *
+        * A try_module_get() should fail by now as our module is
+        * in "going" state since no refs are held anymore and
+        * module_exit() handler being called.
+        */
+       synchronize_rcu();
 }
 EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
 
+u32 tcp_ca_get_key_by_name(const char *name)
+{
+       const struct tcp_congestion_ops *ca;
+       u32 key;
+
+       might_sleep();
+
+       rcu_read_lock();
+       ca = __tcp_ca_find_autoload(name);
+       key = ca ? ca->key : TCP_CA_UNSPEC;
+       rcu_read_unlock();
+
+       return key;
+}
+EXPORT_SYMBOL_GPL(tcp_ca_get_key_by_name);
+
+char *tcp_ca_get_name_by_key(u32 key, char *buffer)
+{
+       const struct tcp_congestion_ops *ca;
+       char *ret = NULL;
+
+       rcu_read_lock();
+       ca = tcp_ca_find_key(key);
+       if (ca)
+               ret = strncpy(buffer, ca->name,
+                             TCP_CA_NAME_MAX);
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(tcp_ca_get_name_by_key);
+
 /* Assign choice of congestion control. */
 void tcp_assign_congestion_control(struct sock *sk)
 {
@@ -107,6 +180,18 @@ void tcp_init_congestion_control(struct sock *sk)
                icsk->icsk_ca_ops->init(sk);
 }
 
+static void tcp_reinit_congestion_control(struct sock *sk,
+                                         const struct tcp_congestion_ops *ca)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+
+       tcp_cleanup_congestion_control(sk);
+       icsk->icsk_ca_ops = ca;
+
+       if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
+               icsk->icsk_ca_ops->init(sk);
+}
+
 /* Manage refcounts on socket close. */
 void tcp_cleanup_congestion_control(struct sock *sk)
 {
@@ -241,42 +326,26 @@ out:
 int tcp_set_congestion_control(struct sock *sk, const char *name)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
-       struct tcp_congestion_ops *ca;
+       const struct tcp_congestion_ops *ca;
        int err = 0;
 
-       rcu_read_lock();
-       ca = tcp_ca_find(name);
+       if (icsk->icsk_ca_dst_locked)
+               return -EPERM;
 
-       /* no change asking for existing value */
+       rcu_read_lock();
+       ca = __tcp_ca_find_autoload(name);
+       /* No change asking for existing value */
        if (ca == icsk->icsk_ca_ops)
                goto out;
-
-#ifdef CONFIG_MODULES
-       /* not found attempt to autoload module */
-       if (!ca && capable(CAP_NET_ADMIN)) {
-               rcu_read_unlock();
-               request_module("tcp_%s", name);
-               rcu_read_lock();
-               ca = tcp_ca_find(name);
-       }
-#endif
        if (!ca)
                err = -ENOENT;
-
        else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
                   ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)))
                err = -EPERM;
-
        else if (!try_module_get(ca->owner))
                err = -EBUSY;
-
-       else {
-               tcp_cleanup_congestion_control(sk);
-               icsk->icsk_ca_ops = ca;
-
-               if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
-                       icsk->icsk_ca_ops->init(sk);
-       }
+       else
+               tcp_reinit_congestion_control(sk, ca);
  out:
        rcu_read_unlock();
        return err;
@@ -291,26 +360,32 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
  * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
  * returns the leftover acks to adjust cwnd in congestion avoidance mode.
  */
-void tcp_slow_start(struct tcp_sock *tp, u32 acked)
+u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
 {
        u32 cwnd = tp->snd_cwnd + acked;
 
        if (cwnd > tp->snd_ssthresh)
                cwnd = tp->snd_ssthresh + 1;
+       acked -= cwnd - tp->snd_cwnd;
        tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
+
+       return acked;
 }
 EXPORT_SYMBOL_GPL(tcp_slow_start);
 
-/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */
-void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w)
+/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
+ * for every packet that was ACKed.
+ */
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
 {
+       tp->snd_cwnd_cnt += acked;
        if (tp->snd_cwnd_cnt >= w) {
-               if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-                       tp->snd_cwnd++;
-               tp->snd_cwnd_cnt = 0;
-       } else {
-               tp->snd_cwnd_cnt++;
+               u32 delta = tp->snd_cwnd_cnt / w;
+
+               tp->snd_cwnd_cnt -= delta * w;
+               tp->snd_cwnd += delta;
        }
+       tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
 }
 EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
 
@@ -329,11 +404,13 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                return;
 
        /* In "safe" area, increase. */
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
-               tcp_slow_start(tp, acked);
+       if (tp->snd_cwnd <= tp->snd_ssthresh) {
+               acked = tcp_slow_start(tp, acked);
+               if (!acked)
+                       return;
+       }
        /* In dangerous area, increase slowly. */
-       else
-               tcp_cong_avoid_ai(tp, tp->snd_cwnd);
+       tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
 }
 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
 
index 6b6002416a73950d493661ea1459870f49917efc..4b276d1ed9807057986bd3b050e2e901bf1afec0 100644 (file)
@@ -93,9 +93,7 @@ struct bictcp {
        u32     epoch_start;    /* beginning of an epoch */
        u32     ack_cnt;        /* number of acks */
        u32     tcp_cwnd;       /* estimated tcp cwnd */
-#define ACK_RATIO_SHIFT        4
-#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT)
-       u16     delayed_ack;    /* estimate the ratio of Packets/ACKs << 4 */
+       u16     unused;
        u8      sample_cnt;     /* number of samples to decide curr_rtt */
        u8      found;          /* the exit point is found? */
        u32     round_start;    /* beginning of each round */
@@ -114,7 +112,6 @@ static inline void bictcp_reset(struct bictcp *ca)
        ca->bic_K = 0;
        ca->delay_min = 0;
        ca->epoch_start = 0;
-       ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
        ca->ack_cnt = 0;
        ca->tcp_cwnd = 0;
        ca->found = 0;
@@ -205,23 +202,30 @@ static u32 cubic_root(u64 a)
 /*
  * Compute congestion window to use.
  */
-static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
+static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
 {
        u32 delta, bic_target, max_cnt;
        u64 offs, t;
 
-       ca->ack_cnt++;  /* count the number of ACKs */
+       ca->ack_cnt += acked;   /* count the number of ACKed packets */
 
        if (ca->last_cwnd == cwnd &&
            (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
                return;
 
+       /* The CUBIC function can update ca->cnt at most once per jiffy.
+        * On all cwnd reduction events, ca->epoch_start is set to 0,
+        * which will force a recalculation of ca->cnt.
+        */
+       if (ca->epoch_start && tcp_time_stamp == ca->last_time)
+               goto tcp_friendliness;
+
        ca->last_cwnd = cwnd;
        ca->last_time = tcp_time_stamp;
 
        if (ca->epoch_start == 0) {
                ca->epoch_start = tcp_time_stamp;       /* record beginning */
-               ca->ack_cnt = 1;                        /* start counting */
+               ca->ack_cnt = acked;                    /* start counting */
                ca->tcp_cwnd = cwnd;                    /* syn with cubic */
 
                if (ca->last_max_cwnd <= cwnd) {
@@ -283,6 +287,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
        if (ca->last_max_cwnd == 0 && ca->cnt > 20)
                ca->cnt = 20;   /* increase cwnd 5% per RTT */
 
+tcp_friendliness:
        /* TCP Friendly */
        if (tcp_friendliness) {
                u32 scale = beta_scale;
@@ -301,7 +306,6 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
                }
        }
 
-       ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack;
        if (ca->cnt == 0)                       /* cannot be zero */
                ca->cnt = 1;
 }
@@ -317,11 +321,12 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (tp->snd_cwnd <= tp->snd_ssthresh) {
                if (hystart && after(ack, ca->end_seq))
                        bictcp_hystart_reset(sk);
-               tcp_slow_start(tp, acked);
-       } else {
-               bictcp_update(ca, tp->snd_cwnd);
-               tcp_cong_avoid_ai(tp, ca->cnt);
+               acked = tcp_slow_start(tp, acked);
+               if (!acked)
+                       return;
        }
+       bictcp_update(ca, tp->snd_cwnd, acked);
+       tcp_cong_avoid_ai(tp, ca->cnt, acked);
 }
 
 static u32 bictcp_recalc_ssthresh(struct sock *sk)
@@ -411,20 +416,10 @@ static void hystart_update(struct sock *sk, u32 delay)
  */
 static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
 {
-       const struct inet_connection_sock *icsk = inet_csk(sk);
        const struct tcp_sock *tp = tcp_sk(sk);
        struct bictcp *ca = inet_csk_ca(sk);
        u32 delay;
 
-       if (icsk->icsk_ca_state == TCP_CA_Open) {
-               u32 ratio = ca->delayed_ack;
-
-               ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
-               ratio += cnt;
-
-               ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
-       }
-
        /* Some calls are for duplicates without timetamps */
        if (rtt_us < 0)
                return;
index 075ab4d5af5e46e7a5a177a324228df592cbe5e3..8fdd27b173061def484663beeace691a8bfa2365 100644 (file)
@@ -100,6 +100,7 @@ int sysctl_tcp_thin_dupack __read_mostly;
 
 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
 int sysctl_tcp_early_retrans __read_mostly = 3;
+int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
 
 #define FLAG_DATA              0x01 /* Incoming frame contained data.          */
 #define FLAG_WIN_UPDATE                0x02 /* Incoming ACK was a window update.       */
@@ -3183,8 +3184,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
 
                tp->fackets_out -= min(pkts_acked, tp->fackets_out);
 
-               if (ca_ops->pkts_acked)
-                       ca_ops->pkts_acked(sk, pkts_acked, ca_seq_rtt_us);
+               if (ca_ops->pkts_acked) {
+                       long rtt_us = min_t(ulong, ca_seq_rtt_us, sack_rtt_us);
+                       ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
+               }
 
        } else if (skb && rtt_update && sack_rtt_us >= 0 &&
                   sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) {
@@ -3319,13 +3322,22 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
 }
 
 /* RFC 5961 7 [ACK Throttling] */
-static void tcp_send_challenge_ack(struct sock *sk)
+static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
 {
        /* unprotected vars, we dont care of overwrites */
        static u32 challenge_timestamp;
        static unsigned int challenge_count;
-       u32 now = jiffies / HZ;
+       struct tcp_sock *tp = tcp_sk(sk);
+       u32 now;
+
+       /* First check our per-socket dupack rate limit. */
+       if (tcp_oow_rate_limited(sock_net(sk), skb,
+                                LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
+                                &tp->last_oow_ack_time))
+               return;
 
+       /* Then check the check host-wide RFC 5961 rate limit. */
+       now = jiffies / HZ;
        if (now != challenge_timestamp) {
                challenge_timestamp = now;
                challenge_count = 0;
@@ -3358,34 +3370,34 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
 }
 
 /* This routine deals with acks during a TLP episode.
+ * We mark the end of a TLP episode on receiving TLP dupack or when
+ * ack is after tlp_high_seq.
  * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe.
  */
 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       bool is_tlp_dupack = (ack == tp->tlp_high_seq) &&
-                            !(flag & (FLAG_SND_UNA_ADVANCED |
-                                      FLAG_NOT_DUP | FLAG_DATA_SACKED));
 
-       /* Mark the end of TLP episode on receiving TLP dupack or when
-        * ack is after tlp_high_seq.
-        */
-       if (is_tlp_dupack) {
-               tp->tlp_high_seq = 0;
+       if (before(ack, tp->tlp_high_seq))
                return;
-       }
 
-       if (after(ack, tp->tlp_high_seq)) {
+       if (flag & FLAG_DSACKING_ACK) {
+               /* This DSACK means original and TLP probe arrived; no loss */
+               tp->tlp_high_seq = 0;
+       } else if (after(ack, tp->tlp_high_seq)) {
+               /* ACK advances: there was a loss, so reduce cwnd. Reset
+                * tlp_high_seq in tcp_init_cwnd_reduction()
+                */
+               tcp_init_cwnd_reduction(sk);
+               tcp_set_ca_state(sk, TCP_CA_CWR);
+               tcp_end_cwnd_reduction(sk);
+               tcp_try_keep_open(sk);
+               NET_INC_STATS_BH(sock_net(sk),
+                                LINUX_MIB_TCPLOSSPROBERECOVERY);
+       } else if (!(flag & (FLAG_SND_UNA_ADVANCED |
+                            FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
+               /* Pure dupack: original and TLP probe arrived; no loss */
                tp->tlp_high_seq = 0;
-               /* Don't reduce cwnd if DSACK arrives for TLP retrans. */
-               if (!(flag & FLAG_DSACKING_ACK)) {
-                       tcp_init_cwnd_reduction(sk);
-                       tcp_set_ca_state(sk, TCP_CA_CWR);
-                       tcp_end_cwnd_reduction(sk);
-                       tcp_try_keep_open(sk);
-                       NET_INC_STATS_BH(sock_net(sk),
-                                        LINUX_MIB_TCPLOSSPROBERECOVERY);
-               }
        }
 }
 
@@ -3421,7 +3433,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        if (before(ack, prior_snd_una)) {
                /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
                if (before(ack, prior_snd_una - tp->max_window)) {
-                       tcp_send_challenge_ack(sk);
+                       tcp_send_challenge_ack(sk, skb);
                        return -1;
                }
                goto old_ack;
@@ -4990,7 +5002,10 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
            tcp_paws_discard(sk, skb)) {
                if (!th->rst) {
                        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
-                       tcp_send_dupack(sk, skb);
+                       if (!tcp_oow_rate_limited(sock_net(sk), skb,
+                                                 LINUX_MIB_TCPACKSKIPPEDPAWS,
+                                                 &tp->last_oow_ack_time))
+                               tcp_send_dupack(sk, skb);
                        goto discard;
                }
                /* Reset is accepted even if it did not pass PAWS. */
@@ -5007,7 +5022,10 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
                if (!th->rst) {
                        if (th->syn)
                                goto syn_challenge;
-                       tcp_send_dupack(sk, skb);
+                       if (!tcp_oow_rate_limited(sock_net(sk), skb,
+                                                 LINUX_MIB_TCPACKSKIPPEDSEQ,
+                                                 &tp->last_oow_ack_time))
+                               tcp_send_dupack(sk, skb);
                }
                goto discard;
        }
@@ -5023,7 +5041,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
                if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt)
                        tcp_reset(sk);
                else
-                       tcp_send_challenge_ack(sk);
+                       tcp_send_challenge_ack(sk, skb);
                goto discard;
        }
 
@@ -5037,7 +5055,7 @@ syn_challenge:
                if (syn_inerr)
                        TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
-               tcp_send_challenge_ack(sk);
+               tcp_send_challenge_ack(sk, skb);
                goto discard;
        }
 
@@ -5870,10 +5888,9 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
  * TCP ECN negotiation.
  *
  * Exception: tcp_ca wants ECN. This is required for DCTCP
- * congestion control; it requires setting ECT on all packets,
- * including SYN. We inverse the test in this case: If our
- * local socket wants ECN, but peer only set ece/cwr (but not
- * ECT in IP header) its probably a non-DCTCP aware sender.
+ * congestion control: Linux DCTCP asserts ECT on all packets,
+ * including SYN, which is most optimal solution; however,
+ * others, such as FreeBSD do not.
  */
 static void tcp_ecn_create_request(struct request_sock *req,
                                   const struct sk_buff *skb,
@@ -5883,18 +5900,15 @@ static void tcp_ecn_create_request(struct request_sock *req,
        const struct tcphdr *th = tcp_hdr(skb);
        const struct net *net = sock_net(listen_sk);
        bool th_ecn = th->ece && th->cwr;
-       bool ect, need_ecn, ecn_ok;
+       bool ect, ecn_ok;
 
        if (!th_ecn)
                return;
 
        ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
-       need_ecn = tcp_ca_needs_ecn(listen_sk);
        ecn_ok = net->ipv4.sysctl_tcp_ecn || dst_feature(dst, RTAX_FEATURE_ECN);
 
-       if (!ect && !need_ecn && ecn_ok)
-               inet_rsk(req)->ecn_ok = 1;
-       else if (ect && need_ecn)
+       if ((!ect && ecn_ok) || tcp_ca_needs_ecn(listen_sk))
                inet_rsk(req)->ecn_ok = 1;
 }
 
index a3f72d7fc06c07c43e1c00b67970eaee074e4593..67bc95fb5d9e236c4ad58eb09c674e4cad01ab23 100644 (file)
@@ -683,7 +683,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
                arg.bound_dev_if = sk->sk_bound_dev_if;
 
        arg.tos = ip_hdr(skb)->tos;
-       ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
+       ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+                             skb, &TCP_SKB_CB(skb)->header.h4.opt,
                              ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
                              &arg, arg.iov[0].iov_len);
 
@@ -767,7 +768,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
        if (oif)
                arg.bound_dev_if = oif;
        arg.tos = tos;
-       ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
+       ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+                             skb, &TCP_SKB_CB(skb)->header.h4.opt,
                              ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
                              &arg, arg.iov[0].iov_len);
 
@@ -1340,6 +1342,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        }
        sk_setup_caps(newsk, dst);
 
+       tcp_ca_openreq_child(newsk, dst);
+
        tcp_sync_mss(newsk, dst_mtu(dst));
        newtp->advmss = dst_metric_advmss(dst);
        if (tcp_sk(sk)->rx_opt.user_mss &&
@@ -2428,14 +2432,39 @@ struct proto tcp_prot = {
 };
 EXPORT_SYMBOL(tcp_prot);
 
+static void __net_exit tcp_sk_exit(struct net *net)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
+       free_percpu(net->ipv4.tcp_sk);
+}
+
 static int __net_init tcp_sk_init(struct net *net)
 {
+       int res, cpu;
+
+       net->ipv4.tcp_sk = alloc_percpu(struct sock *);
+       if (!net->ipv4.tcp_sk)
+               return -ENOMEM;
+
+       for_each_possible_cpu(cpu) {
+               struct sock *sk;
+
+               res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
+                                          IPPROTO_TCP, net);
+               if (res)
+                       goto fail;
+               *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
+       }
        net->ipv4.sysctl_tcp_ecn = 2;
        return 0;
-}
 
-static void __net_exit tcp_sk_exit(struct net *net)
-{
+fail:
+       tcp_sk_exit(net);
+
+       return res;
 }
 
 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
index ed9c9a91851ced8e719aeec58530fb326ec8a2db..e5f41bd5ec1bcfe88199ec077f1558917b1be61b 100644 (file)
@@ -886,7 +886,8 @@ static int tcp_metrics_dump_info(struct sk_buff *skb,
        if (tcp_metrics_fill_info(skb, tm) < 0)
                goto nla_put_failure;
 
-       return genlmsg_end(skb, hdr);
+       genlmsg_end(skb, hdr);
+       return 0;
 
 nla_put_failure:
        genlmsg_cancel(skb, hdr);
index 63d2680b65db36c93737f8c72df66263dfde06bf..dd11ac7798c626d9abe3fbada06fadc99eafe378 100644 (file)
@@ -58,6 +58,25 @@ static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
        return seq == e_win && seq == end_seq;
 }
 
+static enum tcp_tw_status
+tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
+                                 const struct sk_buff *skb, int mib_idx)
+{
+       struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
+
+       if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
+                                 &tcptw->tw_last_oow_ack_time)) {
+               /* Send ACK. Note, we do not put the bucket,
+                * it will be released by caller.
+                */
+               return TCP_TW_ACK;
+       }
+
+       /* We are rate-limiting, so just release the tw sock and drop skb. */
+       inet_twsk_put(tw);
+       return TCP_TW_SUCCESS;
+}
+
 /*
  * * Main purpose of TIME-WAIT state is to close connection gracefully,
  *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
@@ -116,7 +135,8 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
                    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
                                   tcptw->tw_rcv_nxt,
                                   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
-                       return TCP_TW_ACK;
+                       return tcp_timewait_check_oow_rate_limit(
+                               tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
 
                if (th->rst)
                        goto kill;
@@ -250,10 +270,8 @@ kill:
                        inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
                                           TCP_TIMEWAIT_LEN);
 
-               /* Send ACK. Note, we do not put the bucket,
-                * it will be released by caller.
-                */
-               return TCP_TW_ACK;
+               return tcp_timewait_check_oow_rate_limit(
+                       tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
        }
        inet_twsk_put(tw);
        return TCP_TW_SUCCESS;
@@ -289,6 +307,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                tcptw->tw_ts_recent     = tp->rx_opt.ts_recent;
                tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
                tcptw->tw_ts_offset     = tp->tsoffset;
+               tcptw->tw_last_oow_ack_time = 0;
 
 #if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == PF_INET6) {
@@ -399,6 +418,32 @@ static void tcp_ecn_openreq_child(struct tcp_sock *tp,
        tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
 }
 
+void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
+       bool ca_got_dst = false;
+
+       if (ca_key != TCP_CA_UNSPEC) {
+               const struct tcp_congestion_ops *ca;
+
+               rcu_read_lock();
+               ca = tcp_ca_find_key(ca_key);
+               if (likely(ca && try_module_get(ca->owner))) {
+                       icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
+                       icsk->icsk_ca_ops = ca;
+                       ca_got_dst = true;
+               }
+               rcu_read_unlock();
+       }
+
+       if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner))
+               tcp_assign_congestion_control(sk);
+
+       tcp_set_ca_state(sk, TCP_CA_Open);
+}
+EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
+
 /* This is not only more efficient than what we used to do, it eliminates
  * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
  *
@@ -441,6 +486,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                tcp_enable_early_retrans(newtp);
                newtp->tlp_high_seq = 0;
                newtp->lsndtime = treq->snt_synack;
+               newtp->last_oow_ack_time = 0;
                newtp->total_retrans = req->num_retrans;
 
                /* So many TCP implementations out there (incorrectly) count the
@@ -451,10 +497,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                newtp->snd_cwnd = TCP_INIT_CWND;
                newtp->snd_cwnd_cnt = 0;
 
-               if (!try_module_get(newicsk->icsk_ca_ops->owner))
-                       tcp_assign_congestion_control(newsk);
-
-               tcp_set_ca_state(newsk, TCP_CA_Open);
                tcp_init_xmit_timers(newsk);
                __skb_queue_head_init(&newtp->out_of_order_queue);
                newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
@@ -583,7 +625,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                 * Reset timer after retransmitting SYNACK, similar to
                 * the idea of fast retransmit in recovery.
                 */
-               if (!inet_rtx_syn_ack(sk, req))
+               if (!tcp_oow_rate_limited(sock_net(sk), skb,
+                                         LINUX_MIB_TCPACKSKIPPEDSYNRECV,
+                                         &tcp_rsk(req)->last_oow_ack_time) &&
+
+                   !inet_rtx_syn_ack(sk, req))
                        req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout,
                                           TCP_RTO_MAX) + jiffies;
                return NULL;
index 7f18262e2326ac4d7963347d7458273a325caa64..4fcc9a7688499ed13cca430f2d2542c0f92136ee 100644 (file)
@@ -948,7 +948,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
 
        skb_orphan(skb);
        skb->sk = sk;
-       skb->destructor = tcp_wfree;
+       skb->destructor = skb_is_tcp_pure_ack(skb) ? sock_wfree : tcp_wfree;
        skb_set_hash_from_sk(skb, sk);
        atomic_add(skb->truesize, &sk->sk_wmem_alloc);
 
@@ -2019,7 +2019,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
                        break;
 
-               if (tso_segs == 1) {
+               if (tso_segs == 1 || !max_segs) {
                        if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
                                                     (tcp_skb_is_last(sk, skb) ?
                                                      nonagle : TCP_NAGLE_PUSH))))
@@ -2032,7 +2032,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                }
 
                limit = mss_now;
-               if (tso_segs > 1 && !tcp_urg_mode(tp))
+               if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp))
                        limit = tcp_mss_split_point(sk, skb, mss_now,
                                                    min_t(unsigned int,
                                                          cwnd_quota,
@@ -2939,6 +2939,25 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
 }
 EXPORT_SYMBOL(tcp_make_synack);
 
+static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       const struct tcp_congestion_ops *ca;
+       u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
+
+       if (ca_key == TCP_CA_UNSPEC)
+               return;
+
+       rcu_read_lock();
+       ca = tcp_ca_find_key(ca_key);
+       if (likely(ca && try_module_get(ca->owner))) {
+               module_put(icsk->icsk_ca_ops->owner);
+               icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
+               icsk->icsk_ca_ops = ca;
+       }
+       rcu_read_unlock();
+}
+
 /* Do all connect socket setups that can be done AF independent. */
 static void tcp_connect_init(struct sock *sk)
 {
@@ -2964,6 +2983,8 @@ static void tcp_connect_init(struct sock *sk)
        tcp_mtup_init(sk);
        tcp_sync_mss(sk, dst_mtu(dst));
 
+       tcp_ca_dst_init(sk, dst);
+
        if (!tp->window_clamp)
                tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
        tp->advmss = dst_metric_advmss(dst);
@@ -3034,7 +3055,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_fastopen_request *fo = tp->fastopen_req;
-       int syn_loss = 0, space, err = 0;
+       int syn_loss = 0, space, err = 0, copied;
        unsigned long last_syn_loss = 0;
        struct sk_buff *syn_data;
 
@@ -3072,11 +3093,16 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
                goto fallback;
        syn_data->ip_summed = CHECKSUM_PARTIAL;
        memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
-       if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space),
-                                        fo->data->msg_iter.iov, 0, space))) {
+       copied = copy_from_iter(skb_put(syn_data, space), space,
+                               &fo->data->msg_iter);
+       if (unlikely(!copied)) {
                kfree_skb(syn_data);
                goto fallback;
        }
+       if (copied != space) {
+               skb_trim(syn_data, copied);
+               space = copied;
+       }
 
        /* No more data pending in inet_wait_for_connect() */
        if (space == fo->size)
@@ -3244,6 +3270,14 @@ void tcp_send_ack(struct sock *sk)
        skb_reserve(buff, MAX_TCP_HEADER);
        tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
 
+       /* We do not want pure acks influencing TCP Small Queues or fq/pacing
+        * too much.
+        * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
+        * We also avoid tcp_wfree() overhead (cache line miss accessing
+        * tp->tsq_flags) by using regular sock_wfree()
+        */
+       skb_set_tcp_pure_ack(buff);
+
        /* Send it off, this clears delayed acks for us. */
        skb_mstamp_get(&buff->skb_mstamp);
        tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
index 6824afb65d9335532fe2bf61edce82cab8c3fd9c..333bcb2415ffca51e06f3042ae3d94b8e21c0725 100644 (file)
@@ -25,7 +25,8 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (tp->snd_cwnd <= tp->snd_ssthresh)
                tcp_slow_start(tp, acked);
        else
-               tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT));
+               tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
+                                 1);
 }
 
 static u32 tcp_scalable_ssthresh(struct sock *sk)
index a4d2d2d88dcae7c00cf4db83d8b13ce6b143b3b4..112151eeee45bff0c37ac92d78d165ba92bd4d0a 100644 (file)
@@ -159,7 +159,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                                /* In the "non-congestive state", increase cwnd
                                 *  every rtt.
                                 */
-                               tcp_cong_avoid_ai(tp, tp->snd_cwnd);
+                               tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
                        } else {
                                /* In the "congestive state", increase cwnd
                                 * every other rtt.
index cd72732185989b41d1a3f9167eacbf44c235cc01..17d35662930d054fb6fb379a2cddb9600e6b75e3 100644 (file)
@@ -92,7 +92,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 
        } else {
                /* Reno */
-               tcp_cong_avoid_ai(tp, tp->snd_cwnd);
+               tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
        }
 
        /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
index 13b4dcf86ef610d1fcc1b26f7f69f5a6bbd31686..97ef1f8b7be81ed7d06c599b4158db0507afde44 100644 (file)
@@ -1329,7 +1329,7 @@ try_again:
                *addr_len = sizeof(*sin);
        }
        if (inet->cmsg_flags)
-               ip_cmsg_recv(msg, skb);
+               ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr));
 
        err = copied;
        if (flags & MSG_TRUNC)
@@ -1806,7 +1806,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        if (sk != NULL) {
                int ret;
 
-               if (udp_sk(sk)->convert_csum && uh->check && !IS_UDPLITE(sk))
+               if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
                        skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
                                                 inet_compute_pseudo);
 
index 7927db0a927951a20b4502d38fca0bba9e94862c..4a000f1dd75753833b792f6979bf697337f4dd7a 100644 (file)
@@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
        s_slot = cb->args[0];
        num = s_num = cb->args[1];
 
-       for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
+       for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
                struct sock *sk;
                struct hlist_nulls_node *node;
                struct udp_hslot *hslot = &table->hash[slot];
 
+               num = 0;
+
                if (hlist_nulls_empty(&hslot->head))
                        continue;
 
index d3e537ef6b7f57143d272d2d085d31fdc81990e7..d10f6f4ead27918c3e7c31b754ec977518791d70 100644 (file)
@@ -339,7 +339,8 @@ unflush:
        skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
        skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
        NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
-       pp = uo_priv->offload->callbacks.gro_receive(head, skb);
+       pp = uo_priv->offload->callbacks.gro_receive(head, skb,
+                                                    uo_priv->offload);
 
 out_unlock:
        rcu_read_unlock();
@@ -395,7 +396,9 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
 
        if (uo_priv != NULL) {
                NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
-               err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr));
+               err = uo_priv->offload->callbacks.gro_complete(skb,
+                               nhoff + sizeof(struct udphdr),
+                               uo_priv->offload);
        }
 
        rcu_read_unlock();
index 1671263e5fa0eae2e6a7ebad40f912cae002bc53..c83b354850563ebc268a349bc25bc8b668435732 100644 (file)
@@ -63,7 +63,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
        inet_sk(sk)->mc_loop = 0;
 
        /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
-       udp_set_convert_csum(sk, true);
+       inet_inc_convert_csum(sk);
 
        rcu_assign_sk_user_data(sk, cfg->sk_user_data);
 
@@ -75,10 +75,10 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
 }
 EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
 
-int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt,
-                       struct sk_buff *skb, __be32 src, __be32 dst,
-                       __u8 tos, __u8 ttl, __be16 df, __be16 src_port,
-                       __be16 dst_port, bool xnet)
+int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
+                       __be32 src, __be32 dst, __u8 tos, __u8 ttl,
+                       __be16 df, __be16 src_port, __be16 dst_port,
+                       bool xnet, bool nocheck)
 {
        struct udphdr *uh;
 
@@ -90,9 +90,9 @@ int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt,
        uh->source = src_port;
        uh->len = htons(skb->len);
 
-       udp_set_csum(sock->sk->sk_no_check_tx, skb, src, dst, skb->len);
+       udp_set_csum(nocheck, skb, src, dst, skb->len);
 
-       return iptunnel_xmit(sock->sk, rt, skb, src, dst, IPPROTO_UDP,
+       return iptunnel_xmit(skb->sk, rt, skb, src, dst, IPPROTO_UDP,
                             tos, ttl, df, xnet);
 }
 EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
index f7c8bbeb27b704c0106f714d5a0677c27d3346e0..8623118cb2bbc1e12837e732cd5a89ded8d53649 100644 (file)
@@ -201,6 +201,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .disable_ipv6           = 0,
        .accept_dad             = 1,
        .suppress_frag_ndisc    = 1,
+       .accept_ra_mtu          = 1,
 };
 
 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -238,6 +239,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
        .disable_ipv6           = 0,
        .accept_dad             = 1,
        .suppress_frag_ndisc    = 1,
+       .accept_ra_mtu          = 1,
 };
 
 /* Check if a valid qdisc is available */
@@ -489,7 +491,8 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
            nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -619,7 +622,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
                                                       cb->nlh->nlmsg_seq,
                                                       RTM_NEWNETCONF,
                                                       NLM_F_MULTI,
-                                                      -1) <= 0) {
+                                                      -1) < 0) {
                                rcu_read_unlock();
                                goto done;
                        }
@@ -635,7 +638,7 @@ cont:
                                               NETLINK_CB(cb->skb).portid,
                                               cb->nlh->nlmsg_seq,
                                               RTM_NEWNETCONF, NLM_F_MULTI,
-                                              -1) <= 0)
+                                              -1) < 0)
                        goto done;
                else
                        h++;
@@ -646,7 +649,7 @@ cont:
                                               NETLINK_CB(cb->skb).portid,
                                               cb->nlh->nlmsg_seq,
                                               RTM_NEWNETCONF, NLM_F_MULTI,
-                                              -1) <= 0)
+                                              -1) < 0)
                        goto done;
                else
                        h++;
@@ -4047,7 +4050,8 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
        if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
                goto error;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 error:
        nlmsg_cancel(skb, nlh);
@@ -4076,7 +4080,8 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
                return -EMSGSIZE;
        }
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 }
 
 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
@@ -4101,7 +4106,8 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
                return -EMSGSIZE;
        }
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 }
 
 enum addr_type_t {
@@ -4134,7 +4140,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
                                                cb->nlh->nlmsg_seq,
                                                RTM_NEWADDR,
                                                NLM_F_MULTI);
-                       if (err <= 0)
+                       if (err < 0)
                                break;
                        nl_dump_check_consistent(cb, nlmsg_hdr(skb));
                }
@@ -4151,7 +4157,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
                                                  cb->nlh->nlmsg_seq,
                                                  RTM_GETMULTICAST,
                                                  NLM_F_MULTI);
-                       if (err <= 0)
+                       if (err < 0)
                                break;
                }
                break;
@@ -4166,7 +4172,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
                                                  cb->nlh->nlmsg_seq,
                                                  RTM_GETANYCAST,
                                                  NLM_F_MULTI);
-                       if (err <= 0)
+                       if (err < 0)
                                break;
                }
                break;
@@ -4209,7 +4215,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
                                goto cont;
 
                        if (in6_dump_addrs(idev, skb, cb, type,
-                                          s_ip_idx, &ip_idx) <= 0)
+                                          s_ip_idx, &ip_idx) < 0)
                                goto done;
 cont:
                        idx++;
@@ -4376,6 +4382,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
        array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
        array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
+       array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
 }
 
 static inline size_t inet6_ifla6_size(void)
@@ -4638,7 +4645,8 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
                goto nla_put_failure;
 
        nla_nest_end(skb, protoinfo);
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -4670,7 +4678,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                        if (inet6_fill_ifinfo(skb, idev,
                                              NETLINK_CB(cb->skb).portid,
                                              cb->nlh->nlmsg_seq,
-                                             RTM_NEWLINK, NLM_F_MULTI) <= 0)
+                                             RTM_NEWLINK, NLM_F_MULTI) < 0)
                                goto out;
 cont:
                        idx++;
@@ -4747,7 +4755,8 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
        ci.valid_time = ntohl(pinfo->valid);
        if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
                goto nla_put_failure;
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -5252,6 +5261,13 @@ static struct addrconf_sysctl_table
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
+               {
+                       .procname       = "accept_ra_mtu",
+                       .data           = &ipv6_devconf.accept_ra_mtu,
+                       .maxlen         = sizeof(int),
+                       .mode           = 0644,
+                       .proc_handler   = proc_dointvec,
+               },
                {
                        /* sentinel */
                }
@@ -5389,7 +5405,7 @@ static struct pernet_operations addrconf_ops = {
        .exit = addrconf_exit_net,
 };
 
-static struct rtnl_af_ops inet6_ops = {
+static struct rtnl_af_ops inet6_ops __read_mostly = {
        .family           = AF_INET6,
        .fill_link_af     = inet6_fill_link_af,
        .get_link_af_size = inet6_get_link_af_size,
index fd0dc47f471dad23566248fecadd571a1a4c00d9..e43e79d0a6124caa06cd2e2c09203ad2cd032c9e 100644 (file)
@@ -490,7 +490,8 @@ static int ip6addrlbl_fill(struct sk_buff *skb,
                return -EMSGSIZE;
        }
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 }
 
 static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
@@ -510,7 +511,7 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                              cb->nlh->nlmsg_seq,
                                              RTM_NEWADDRLABEL,
                                              NLM_F_MULTI);
-                       if (err <= 0)
+                       if (err < 0)
                                break;
                }
                idx++;
index 100c589a2a6cf951bdb8a7a2e56b087fb4fe56bf..c215be70cac08af78953ea860c5d67cf9d3fa642 100644 (file)
@@ -369,7 +369,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
 
        serr = SKB_EXT_ERR(skb);
 
-       if (sin) {
+       if (sin && skb->len) {
                const unsigned char *nh = skb_network_header(skb);
                sin->sin6_family = AF_INET6;
                sin->sin6_flowinfo = 0;
@@ -393,11 +393,9 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
 
        memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
        sin = &errhdr.offender;
-       sin->sin6_family = AF_UNSPEC;
-       if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
+       memset(sin, 0, sizeof(*sin));
+       if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL && skb->len) {
                sin->sin6_family = AF_INET6;
-               sin->sin6_flowinfo = 0;
-               sin->sin6_port = 0;
                if (np->rxopt.all) {
                        if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP &&
                            serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6)
@@ -412,12 +410,9 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
                                ipv6_iface_scope_id(&sin->sin6_addr,
                                                    IP6CB(skb)->iif);
                } else {
-                       struct inet_sock *inet = inet_sk(sk);
-
                        ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
                                               &sin->sin6_addr);
-                       sin->sin6_scope_id = 0;
-                       if (inet->cmsg_flags)
+                       if (inet_sk(sk)->cmsg_flags)
                                ip_cmsg_recv(msg, skb);
                }
        }
index d674152b6ede5d6134f1a89cb8ebaa6a81308c73..a5e95199585ecb55bf63bbf02d2d34cacfbcca48 100644 (file)
@@ -427,7 +427,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
         *      Dest addr check
         */
 
-       if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
+       if (addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST) {
                if (type != ICMPV6_PKT_TOOBIG &&
                    !(type == ICMPV6_PARAMPROB &&
                      code == ICMPV6_UNK_OPTION &&
index b2d1838897c933f6f1ceb2a6f64a2f67495ce3c9..263ef4143bff8fec124ab3087dde78164b5684f3 100644 (file)
@@ -277,7 +277,6 @@ static int fib6_dump_node(struct fib6_walker *w)
                        w->leaf = rt;
                        return 1;
                }
-               WARN_ON(res == 0);
        }
        w->leaf = NULL;
        return 0;
@@ -630,33 +629,59 @@ static bool rt6_qualify_for_ecmp(struct rt6_info *rt)
               RTF_GATEWAY;
 }
 
-static int fib6_commit_metrics(struct dst_entry *dst,
-                              struct nlattr *mx, int mx_len)
+static void fib6_copy_metrics(u32 *mp, const struct mx6_config *mxc)
 {
-       struct nlattr *nla;
-       int remaining;
-       u32 *mp;
+       int i;
+
+       for (i = 0; i < RTAX_MAX; i++) {
+               if (test_bit(i, mxc->mx_valid))
+                       mp[i] = mxc->mx[i];
+       }
+}
+
+static int fib6_commit_metrics(struct dst_entry *dst, struct mx6_config *mxc)
+{
+       if (!mxc->mx)
+               return 0;
 
        if (dst->flags & DST_HOST) {
-               mp = dst_metrics_write_ptr(dst);
-       } else {
-               mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
-               if (!mp)
+               u32 *mp = dst_metrics_write_ptr(dst);
+
+               if (unlikely(!mp))
                        return -ENOMEM;
-               dst_init_metrics(dst, mp, 0);
-       }
 
-       nla_for_each_attr(nla, mx, mx_len, remaining) {
-               int type = nla_type(nla);
+               fib6_copy_metrics(mp, mxc);
+       } else {
+               dst_init_metrics(dst, mxc->mx, false);
 
-               if (type) {
-                       if (type > RTAX_MAX)
-                               return -EINVAL;
+               /* We've stolen mx now. */
+               mxc->mx = NULL;
+       }
 
-                       mp[type - 1] = nla_get_u32(nla);
+       return 0;
+}
+
+static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn,
+                         struct net *net)
+{
+       if (atomic_read(&rt->rt6i_ref) != 1) {
+               /* This route is used as dummy address holder in some split
+                * nodes. It is not leaked, but it still holds other resources,
+                * which must be released in time. So, scan ascendant nodes
+                * and replace dummy references to this route with references
+                * to still alive ones.
+                */
+               while (fn) {
+                       if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
+                               fn->leaf = fib6_find_prefix(net, fn);
+                               atomic_inc(&fn->leaf->rt6i_ref);
+                               rt6_release(rt);
+                       }
+                       fn = fn->parent;
                }
+               /* No more references are possible at this point. */
+               BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
        }
-       return 0;
 }
 
 /*
@@ -664,7 +689,7 @@ static int fib6_commit_metrics(struct dst_entry *dst,
  */
 
 static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
-                           struct nl_info *info, struct nlattr *mx, int mx_len)
+                           struct nl_info *info, struct mx6_config *mxc)
 {
        struct rt6_info *iter = NULL;
        struct rt6_info **ins;
@@ -773,11 +798,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
                        pr_warn("NLM_F_CREATE should be set when creating new route\n");
 
 add:
-               if (mx) {
-                       err = fib6_commit_metrics(&rt->dst, mx, mx_len);
-                       if (err)
-                               return err;
-               }
+               err = fib6_commit_metrics(&rt->dst, mxc);
+               if (err)
+                       return err;
+
                rt->dst.rt6_next = iter;
                *ins = rt;
                rt->rt6i_node = fn;
@@ -797,21 +821,22 @@ add:
                        pr_warn("NLM_F_REPLACE set, but no existing node found!\n");
                        return -ENOENT;
                }
-               if (mx) {
-                       err = fib6_commit_metrics(&rt->dst, mx, mx_len);
-                       if (err)
-                               return err;
-               }
+
+               err = fib6_commit_metrics(&rt->dst, mxc);
+               if (err)
+                       return err;
+
                *ins = rt;
                rt->rt6i_node = fn;
                rt->dst.rt6_next = iter->dst.rt6_next;
                atomic_inc(&rt->rt6i_ref);
                inet6_rt_notify(RTM_NEWROUTE, rt, info);
-               rt6_release(iter);
                if (!(fn->fn_flags & RTN_RTINFO)) {
                        info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
                        fn->fn_flags |= RTN_RTINFO;
                }
+               fib6_purge_rt(iter, fn, info->nl_net);
+               rt6_release(iter);
        }
 
        return 0;
@@ -838,8 +863,8 @@ void fib6_force_start_gc(struct net *net)
  *     with source addr info in sub-trees
  */
 
-int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
-            struct nlattr *mx, int mx_len)
+int fib6_add(struct fib6_node *root, struct rt6_info *rt,
+            struct nl_info *info, struct mx6_config *mxc)
 {
        struct fib6_node *fn, *pn = NULL;
        int err = -ENOMEM;
@@ -934,7 +959,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
        }
 #endif
 
-       err = fib6_add_rt2node(fn, rt, info, mx, mx_len);
+       err = fib6_add_rt2node(fn, rt, info, mxc);
        if (!err) {
                fib6_start_gc(info->nl_net, rt);
                if (!(rt->rt6i_flags & RTF_CACHE))
@@ -1322,24 +1347,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
                fn = fib6_repair_tree(net, fn);
        }
 
-       if (atomic_read(&rt->rt6i_ref) != 1) {
-               /* This route is used as dummy address holder in some split
-                * nodes. It is not leaked, but it still holds other resources,
-                * which must be released in time. So, scan ascendant nodes
-                * and replace dummy references to this route with references
-                * to still alive ones.
-                */
-               while (fn) {
-                       if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
-                               fn->leaf = fib6_find_prefix(net, fn);
-                               atomic_inc(&fn->leaf->rt6i_ref);
-                               rt6_release(rt);
-                       }
-                       fn = fn->parent;
-               }
-               /* No more references are possible at this point. */
-               BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
-       }
+       fib6_purge_rt(rt, fn, net);
 
        inet6_rt_notify(RTM_DELROUTE, rt, info);
        rt6_release(rt);
index 13cda4c6313bad7ecca5b20b5be7c8ac28bcf1ab..bc28b7d42a6dab05abee80d4fa84c102d92ca91f 100644 (file)
@@ -417,7 +417,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                if (code == ICMPV6_HDR_FIELD)
                        teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
 
-               if (teli && teli == info - 2) {
+               if (teli && teli == be32_to_cpu(info) - 2) {
                        tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
                        if (tel->encap_limit == 0) {
                                net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
@@ -429,7 +429,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                }
                break;
        case ICMPV6_PKT_TOOBIG:
-               mtu = info - offset;
+               mtu = be32_to_cpu(info) - offset;
                if (mtu < IPV6_MIN_MTU)
                        mtu = IPV6_MIN_MTU;
                t->dev->mtu = mtu;
@@ -1662,6 +1662,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
        .dellink        = ip6gre_dellink,
        .get_size       = ip6gre_get_size,
        .fill_info      = ip6gre_fill_info,
+       .get_link_net   = ip6_tnl_get_link_net,
 };
 
 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
@@ -1675,6 +1676,7 @@ static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
        .changelink     = ip6gre_changelink,
        .get_size       = ip6gre_get_size,
        .fill_info      = ip6gre_fill_info,
+       .get_link_net   = ip6_tnl_get_link_net,
 };
 
 /*
index ce69a12ae48c29276871dacb30eebea086b291a3..d33df4cbd8720fa6a86a065b8e439b0362ea4356 100644 (file)
@@ -537,20 +537,6 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        skb_copy_secmark(to, from);
 }
 
-static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
-{
-       static u32 ip6_idents_hashrnd __read_mostly;
-       u32 hash, id;
-
-       net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
-
-       hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
-       hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
-
-       id = ip_idents_reserve(hash, 1);
-       fhdr->identification = htonl(id);
-}
-
 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 {
        struct sk_buff *frag;
@@ -1041,6 +1027,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
 
 static inline int ip6_ufo_append_data(struct sock *sk,
+                       struct sk_buff_head *queue,
                        int getfrag(void *from, char *to, int offset, int len,
                        int odd, struct sk_buff *skb),
                        void *from, int length, int hh_len, int fragheaderlen,
@@ -1056,7 +1043,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
         * device, so create one single skb packet containing complete
         * udp datagram
         */
-       skb = skb_peek_tail(&sk->sk_write_queue);
+       skb = skb_peek_tail(queue);
        if (skb == NULL) {
                skb = sock_alloc_send_skb(sk,
                        hh_len + fragheaderlen + transhdrlen + 20,
@@ -1079,7 +1066,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
                skb->protocol = htons(ETH_P_IPV6);
                skb->csum = 0;
 
-               __skb_queue_tail(&sk->sk_write_queue, skb);
+               __skb_queue_tail(queue, skb);
        } else if (skb_is_gso(skb)) {
                goto append;
        }
@@ -1135,99 +1122,106 @@ static void ip6_append_data_mtu(unsigned int *mtu,
        }
 }
 
-int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
-       int offset, int len, int odd, struct sk_buff *skb),
-       void *from, int length, int transhdrlen,
-       int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
-       struct rt6_info *rt, unsigned int flags, int dontfrag)
+static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
+                         struct inet6_cork *v6_cork,
+                         int hlimit, int tclass, struct ipv6_txoptions *opt,
+                         struct rt6_info *rt, struct flowi6 *fl6)
 {
-       struct inet_sock *inet = inet_sk(sk);
        struct ipv6_pinfo *np = inet6_sk(sk);
-       struct inet_cork *cork;
+       unsigned int mtu;
+
+       /*
+        * setup for corking
+        */
+       if (opt) {
+               if (WARN_ON(v6_cork->opt))
+                       return -EINVAL;
+
+               v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
+               if (unlikely(v6_cork->opt == NULL))
+                       return -ENOBUFS;
+
+               v6_cork->opt->tot_len = opt->tot_len;
+               v6_cork->opt->opt_flen = opt->opt_flen;
+               v6_cork->opt->opt_nflen = opt->opt_nflen;
+
+               v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
+                                                   sk->sk_allocation);
+               if (opt->dst0opt && !v6_cork->opt->dst0opt)
+                       return -ENOBUFS;
+
+               v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
+                                                   sk->sk_allocation);
+               if (opt->dst1opt && !v6_cork->opt->dst1opt)
+                       return -ENOBUFS;
+
+               v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
+                                                  sk->sk_allocation);
+               if (opt->hopopt && !v6_cork->opt->hopopt)
+                       return -ENOBUFS;
+
+               v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
+                                                   sk->sk_allocation);
+               if (opt->srcrt && !v6_cork->opt->srcrt)
+                       return -ENOBUFS;
+
+               /* need source address above miyazawa*/
+       }
+       dst_hold(&rt->dst);
+       cork->base.dst = &rt->dst;
+       cork->fl.u.ip6 = *fl6;
+       v6_cork->hop_limit = hlimit;
+       v6_cork->tclass = tclass;
+       if (rt->dst.flags & DST_XFRM_TUNNEL)
+               mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
+                     rt->dst.dev->mtu : dst_mtu(&rt->dst);
+       else
+               mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
+                     rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+       if (np->frag_size < mtu) {
+               if (np->frag_size)
+                       mtu = np->frag_size;
+       }
+       cork->base.fragsize = mtu;
+       if (dst_allfrag(rt->dst.path))
+               cork->base.flags |= IPCORK_ALLFRAG;
+       cork->base.length = 0;
+
+       return 0;
+}
+
+static int __ip6_append_data(struct sock *sk,
+                            struct flowi6 *fl6,
+                            struct sk_buff_head *queue,
+                            struct inet_cork *cork,
+                            struct inet6_cork *v6_cork,
+                            struct page_frag *pfrag,
+                            int getfrag(void *from, char *to, int offset,
+                                        int len, int odd, struct sk_buff *skb),
+                            void *from, int length, int transhdrlen,
+                            unsigned int flags, int dontfrag)
+{
        struct sk_buff *skb, *skb_prev = NULL;
        unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
-       int exthdrlen;
-       int dst_exthdrlen;
+       int exthdrlen = 0;
+       int dst_exthdrlen = 0;
        int hh_len;
        int copy;
        int err;
        int offset = 0;
        __u8 tx_flags = 0;
        u32 tskey = 0;
+       struct rt6_info *rt = (struct rt6_info *)cork->dst;
+       struct ipv6_txoptions *opt = v6_cork->opt;
+       int csummode = CHECKSUM_NONE;
 
-       if (flags&MSG_PROBE)
-               return 0;
-       cork = &inet->cork.base;
-       if (skb_queue_empty(&sk->sk_write_queue)) {
-               /*
-                * setup for corking
-                */
-               if (opt) {
-                       if (WARN_ON(np->cork.opt))
-                               return -EINVAL;
-
-                       np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
-                       if (unlikely(np->cork.opt == NULL))
-                               return -ENOBUFS;
-
-                       np->cork.opt->tot_len = opt->tot_len;
-                       np->cork.opt->opt_flen = opt->opt_flen;
-                       np->cork.opt->opt_nflen = opt->opt_nflen;
-
-                       np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
-                                                           sk->sk_allocation);
-                       if (opt->dst0opt && !np->cork.opt->dst0opt)
-                               return -ENOBUFS;
-
-                       np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
-                                                           sk->sk_allocation);
-                       if (opt->dst1opt && !np->cork.opt->dst1opt)
-                               return -ENOBUFS;
-
-                       np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
-                                                          sk->sk_allocation);
-                       if (opt->hopopt && !np->cork.opt->hopopt)
-                               return -ENOBUFS;
-
-                       np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
-                                                           sk->sk_allocation);
-                       if (opt->srcrt && !np->cork.opt->srcrt)
-                               return -ENOBUFS;
-
-                       /* need source address above miyazawa*/
-               }
-               dst_hold(&rt->dst);
-               cork->dst = &rt->dst;
-               inet->cork.fl.u.ip6 = *fl6;
-               np->cork.hop_limit = hlimit;
-               np->cork.tclass = tclass;
-               if (rt->dst.flags & DST_XFRM_TUNNEL)
-                       mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
-                             rt->dst.dev->mtu : dst_mtu(&rt->dst);
-               else
-                       mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
-                             rt->dst.dev->mtu : dst_mtu(rt->dst.path);
-               if (np->frag_size < mtu) {
-                       if (np->frag_size)
-                               mtu = np->frag_size;
-               }
-               cork->fragsize = mtu;
-               if (dst_allfrag(rt->dst.path))
-                       cork->flags |= IPCORK_ALLFRAG;
-               cork->length = 0;
-               exthdrlen = (opt ? opt->opt_flen : 0);
-               length += exthdrlen;
-               transhdrlen += exthdrlen;
+       skb = skb_peek_tail(queue);
+       if (!skb) {
+               exthdrlen = opt ? opt->opt_flen : 0;
                dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
-       } else {
-               rt = (struct rt6_info *)cork->dst;
-               fl6 = &inet->cork.fl.u.ip6;
-               opt = np->cork.opt;
-               transhdrlen = 0;
-               exthdrlen = 0;
-               dst_exthdrlen = 0;
-               mtu = cork->fragsize;
        }
+
+       mtu = cork->fragsize;
        orig_mtu = mtu;
 
        hh_len = LL_RESERVED_SPACE(rt->dst.dev);
@@ -1276,6 +1270,14 @@ emsgsize:
                        tskey = sk->sk_tskey++;
        }
 
+       /* If this is the first and only packet and device
+        * supports checksum offloading, let's use it.
+        */
+       if (!skb &&
+           length + fragheaderlen < mtu &&
+           rt->dst.dev->features & NETIF_F_V6_CSUM &&
+           !exthdrlen)
+               csummode = CHECKSUM_PARTIAL;
        /*
         * Let's try using as much space as possible.
         * Use MTU if total length of the message fits into the MTU.
@@ -1292,13 +1294,12 @@ emsgsize:
         * --yoshfuji
         */
 
-       skb = skb_peek_tail(&sk->sk_write_queue);
        cork->length += length;
        if (((length > mtu) ||
             (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO)) {
-               err = ip6_ufo_append_data(sk, getfrag, from, length,
+               err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
                                          hh_len, fragheaderlen,
                                          transhdrlen, mtu, flags, rt);
                if (err)
@@ -1389,7 +1390,7 @@ alloc_new_skb:
                         *      Fill in the control structures
                         */
                        skb->protocol = htons(ETH_P_IPV6);
-                       skb->ip_summed = CHECKSUM_NONE;
+                       skb->ip_summed = csummode;
                        skb->csum = 0;
                        /* reserve for fragmentation and ipsec header */
                        skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
@@ -1439,7 +1440,7 @@ alloc_new_skb:
                        /*
                         * Put the packet on the pending queue
                         */
-                       __skb_queue_tail(&sk->sk_write_queue, skb);
+                       __skb_queue_tail(queue, skb);
                        continue;
                }
 
@@ -1458,7 +1459,6 @@ alloc_new_skb:
                        }
                } else {
                        int i = skb_shinfo(skb)->nr_frags;
-                       struct page_frag *pfrag = sk_page_frag(sk);
 
                        err = -ENOMEM;
                        if (!sk_page_frag_refill(sk, pfrag))
@@ -1501,43 +1501,81 @@ error:
        IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
        return err;
 }
+
+int ip6_append_data(struct sock *sk,
+                   int getfrag(void *from, char *to, int offset, int len,
+                               int odd, struct sk_buff *skb),
+                   void *from, int length, int transhdrlen, int hlimit,
+                   int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
+                   struct rt6_info *rt, unsigned int flags, int dontfrag)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+       int exthdrlen;
+       int err;
+
+       if (flags&MSG_PROBE)
+               return 0;
+       if (skb_queue_empty(&sk->sk_write_queue)) {
+               /*
+                * setup for corking
+                */
+               err = ip6_setup_cork(sk, &inet->cork, &np->cork, hlimit,
+                                    tclass, opt, rt, fl6);
+               if (err)
+                       return err;
+
+               exthdrlen = (opt ? opt->opt_flen : 0);
+               length += exthdrlen;
+               transhdrlen += exthdrlen;
+       } else {
+               fl6 = &inet->cork.fl.u.ip6;
+               transhdrlen = 0;
+       }
+
+       return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
+                                &np->cork, sk_page_frag(sk), getfrag,
+                                from, length, transhdrlen, flags, dontfrag);
+}
 EXPORT_SYMBOL_GPL(ip6_append_data);
 
-static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
+static void ip6_cork_release(struct inet_cork_full *cork,
+                            struct inet6_cork *v6_cork)
 {
-       if (np->cork.opt) {
-               kfree(np->cork.opt->dst0opt);
-               kfree(np->cork.opt->dst1opt);
-               kfree(np->cork.opt->hopopt);
-               kfree(np->cork.opt->srcrt);
-               kfree(np->cork.opt);
-               np->cork.opt = NULL;
+       if (v6_cork->opt) {
+               kfree(v6_cork->opt->dst0opt);
+               kfree(v6_cork->opt->dst1opt);
+               kfree(v6_cork->opt->hopopt);
+               kfree(v6_cork->opt->srcrt);
+               kfree(v6_cork->opt);
+               v6_cork->opt = NULL;
        }
 
-       if (inet->cork.base.dst) {
-               dst_release(inet->cork.base.dst);
-               inet->cork.base.dst = NULL;
-               inet->cork.base.flags &= ~IPCORK_ALLFRAG;
+       if (cork->base.dst) {
+               dst_release(cork->base.dst);
+               cork->base.dst = NULL;
+               cork->base.flags &= ~IPCORK_ALLFRAG;
        }
-       memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
+       memset(&cork->fl, 0, sizeof(cork->fl));
 }
 
-int ip6_push_pending_frames(struct sock *sk)
+struct sk_buff *__ip6_make_skb(struct sock *sk,
+                              struct sk_buff_head *queue,
+                              struct inet_cork_full *cork,
+                              struct inet6_cork *v6_cork)
 {
        struct sk_buff *skb, *tmp_skb;
        struct sk_buff **tail_skb;
        struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
-       struct inet_sock *inet = inet_sk(sk);
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct net *net = sock_net(sk);
        struct ipv6hdr *hdr;
-       struct ipv6_txoptions *opt = np->cork.opt;
-       struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
-       struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
+       struct ipv6_txoptions *opt = v6_cork->opt;
+       struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
+       struct flowi6 *fl6 = &cork->fl.u.ip6;
        unsigned char proto = fl6->flowi6_proto;
-       int err = 0;
 
-       skb = __skb_dequeue(&sk->sk_write_queue);
+       skb = __skb_dequeue(queue);
        if (skb == NULL)
                goto out;
        tail_skb = &(skb_shinfo(skb)->frag_list);
@@ -1545,7 +1583,7 @@ int ip6_push_pending_frames(struct sock *sk)
        /* move skb->data to ip header from ext header */
        if (skb->data < skb_network_header(skb))
                __skb_pull(skb, skb_network_offset(skb));
-       while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
+       while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
                __skb_pull(tmp_skb, skb_network_header_len(skb));
                *tail_skb = tmp_skb;
                tail_skb = &(tmp_skb->next);
@@ -1570,10 +1608,10 @@ int ip6_push_pending_frames(struct sock *sk)
        skb_reset_network_header(skb);
        hdr = ipv6_hdr(skb);
 
-       ip6_flow_hdr(hdr, np->cork.tclass,
+       ip6_flow_hdr(hdr, v6_cork->tclass,
                     ip6_make_flowlabel(net, skb, fl6->flowlabel,
                                        np->autoflowlabel));
-       hdr->hop_limit = np->cork.hop_limit;
+       hdr->hop_limit = v6_cork->hop_limit;
        hdr->nexthdr = proto;
        hdr->saddr = fl6->saddr;
        hdr->daddr = *final_dst;
@@ -1590,34 +1628,104 @@ int ip6_push_pending_frames(struct sock *sk)
                ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
        }
 
+       ip6_cork_release(cork, v6_cork);
+out:
+       return skb;
+}
+
+int ip6_send_skb(struct sk_buff *skb)
+{
+       struct net *net = sock_net(skb->sk);
+       struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+       int err;
+
        err = ip6_local_out(skb);
        if (err) {
                if (err > 0)
                        err = net_xmit_errno(err);
                if (err)
-                       goto error;
+                       IP6_INC_STATS(net, rt->rt6i_idev,
+                                     IPSTATS_MIB_OUTDISCARDS);
        }
 
-out:
-       ip6_cork_release(inet, np);
        return err;
-error:
-       IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
-       goto out;
+}
+
+int ip6_push_pending_frames(struct sock *sk)
+{
+       struct sk_buff *skb;
+
+       skb = ip6_finish_skb(sk);
+       if (!skb)
+               return 0;
+
+       return ip6_send_skb(skb);
 }
 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
 
-void ip6_flush_pending_frames(struct sock *sk)
+static void __ip6_flush_pending_frames(struct sock *sk,
+                                      struct sk_buff_head *queue,
+                                      struct inet_cork_full *cork,
+                                      struct inet6_cork *v6_cork)
 {
        struct sk_buff *skb;
 
-       while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
+       while ((skb = __skb_dequeue_tail(queue)) != NULL) {
                if (skb_dst(skb))
                        IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
                                      IPSTATS_MIB_OUTDISCARDS);
                kfree_skb(skb);
        }
 
-       ip6_cork_release(inet_sk(sk), inet6_sk(sk));
+       ip6_cork_release(cork, v6_cork);
+}
+
+void ip6_flush_pending_frames(struct sock *sk)
+{
+       __ip6_flush_pending_frames(sk, &sk->sk_write_queue,
+                                  &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
 }
 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
+
+struct sk_buff *ip6_make_skb(struct sock *sk,
+                            int getfrag(void *from, char *to, int offset,
+                                        int len, int odd, struct sk_buff *skb),
+                            void *from, int length, int transhdrlen,
+                            int hlimit, int tclass,
+                            struct ipv6_txoptions *opt, struct flowi6 *fl6,
+                            struct rt6_info *rt, unsigned int flags,
+                            int dontfrag)
+{
+       struct inet_cork_full cork;
+       struct inet6_cork v6_cork;
+       struct sk_buff_head queue;
+       int exthdrlen = (opt ? opt->opt_flen : 0);
+       int err;
+
+       if (flags & MSG_PROBE)
+               return NULL;
+
+       __skb_queue_head_init(&queue);
+
+       cork.base.flags = 0;
+       cork.base.addr = 0;
+       cork.base.opt = NULL;
+       v6_cork.opt = NULL;
+       err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
+       if (err)
+               return ERR_PTR(err);
+
+       if (dontfrag < 0)
+               dontfrag = inet6_sk(sk)->dontfrag;
+
+       err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
+                               &current->task_frag, getfrag, from,
+                               length + exthdrlen, transhdrlen + exthdrlen,
+                               flags, dontfrag);
+       if (err) {
+               __ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
+               return ERR_PTR(err);
+       }
+
+       return __ip6_make_skb(sk, &queue, &cork, &v6_cork);
+}
index 92b3da571980670e4f343a278eec49521057be1e..266a264ec21273147b32d75ac7bde55afdd1b4cf 100644 (file)
@@ -1760,6 +1760,14 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
+struct net *ip6_tnl_get_link_net(const struct net_device *dev)
+{
+       struct ip6_tnl *tunnel = netdev_priv(dev);
+
+       return tunnel->net;
+}
+EXPORT_SYMBOL(ip6_tnl_get_link_net);
+
 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
        [IFLA_IPTUN_LINK]               = { .type = NLA_U32 },
        [IFLA_IPTUN_LOCAL]              = { .len = sizeof(struct in6_addr) },
@@ -1783,6 +1791,7 @@ static struct rtnl_link_ops ip6_link_ops __read_mostly = {
        .dellink        = ip6_tnl_dellink,
        .get_size       = ip6_tnl_get_size,
        .fill_info      = ip6_tnl_fill_info,
+       .get_link_net   = ip6_tnl_get_link_net,
 };
 
 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
index 8db6c98fe21858f4b3f630af277a0137e438aa8d..32d9b268e7d85faa6b33a3fa1a3f3fff9a36cad8 100644 (file)
@@ -62,14 +62,14 @@ error:
 }
 EXPORT_SYMBOL_GPL(udp_sock_create6);
 
-int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
-                        struct sk_buff *skb, struct net_device *dev,
-                        struct in6_addr *saddr, struct in6_addr *daddr,
-                        __u8 prio, __u8 ttl, __be16 src_port, __be16 dst_port)
+int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
+                        struct net_device *dev, struct in6_addr *saddr,
+                        struct in6_addr *daddr,
+                        __u8 prio, __u8 ttl, __be16 src_port,
+                        __be16 dst_port, bool nocheck)
 {
        struct udphdr *uh;
        struct ipv6hdr *ip6h;
-       struct sock *sk = sock->sk;
 
        __skb_push(skb, sizeof(*uh));
        skb_reset_transport_header(skb);
@@ -85,7 +85,7 @@ int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
                            | IPSKB_REROUTED);
        skb_dst_set(skb, dst);
 
-       udp6_set_csum(udp_get_no_check6_tx(sk), skb, saddr, daddr, skb->len);
+       udp6_set_csum(nocheck, skb, saddr, daddr, skb->len);
 
        __skb_push(skb, sizeof(*ip6h));
        skb_reset_network_header(skb);
index ace10d0b3aacb8e484e78f4f9a69113727ba3f65..5fb9e212eca8d0629bf9dc8b4677565c965b679f 100644 (file)
@@ -1016,6 +1016,7 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = {
        .changelink     = vti6_changelink,
        .get_size       = vti6_get_size,
        .fill_info      = vti6_fill_info,
+       .get_link_net   = ip6_tnl_get_link_net,
 };
 
 static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n)
index 722669754bbfb89ab04ea7677f1744a967c7e13e..34b682617f504359cecff4447c6015f90623e949 100644 (file)
@@ -2388,7 +2388,8 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
        if (err < 0 && err != -ENOENT)
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
index 66980d8d98d1f5b3ef7a50dc33cb9b617f25604d..8d766d9100cba408525faf5818b7b0c6b6bc543c 100644 (file)
@@ -996,13 +996,9 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                lock_sock(sk);
                skb = np->pktoptions;
                if (skb)
-                       atomic_inc(&skb->users);
-               release_sock(sk);
-
-               if (skb) {
                        ip6_datagram_recv_ctl(sk, &msg, skb);
-                       kfree_skb(skb);
-               } else {
+               release_sock(sk);
+               if (!skb) {
                        if (np->rxopt.bits.rxinfo) {
                                struct in6_pktinfo src_info;
                                src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
index 682866777d5383dabaf6e3ae2f6f2deef261f557..8a9d7c19e247ceb58ce51aec5bfd684dfb4cb4ad 100644 (file)
@@ -1348,7 +1348,7 @@ skip_routeinfo:
                }
        }
 
-       if (ndopts.nd_opts_mtu) {
+       if (ndopts.nd_opts_mtu && in6_dev->cnf.accept_ra_mtu) {
                __be32 n;
                u32 mtu;
 
index 2433a6bfb191c4259bedfe2d697baa618c818b51..11820b6b36130d4ec83af3f173a1ca70df8cbf93 100644 (file)
@@ -27,10 +27,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
 
        memset(&range, 0, sizeof(range));
        if (priv->sreg_proto_min) {
-               range.min_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_min].data[0];
-               range.max_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_max].data[0];
+               range.min_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_min].data[0];
+               range.max_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_max].data[0];
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index 97f41a3e68d98b89d6b8f643780f8a883bd46f55..54520a0bd5e3b5feac3d4bc4221fac77ab990031 100644 (file)
@@ -9,6 +9,24 @@
 #include <net/addrconf.h>
 #include <net/secure_seq.h>
 
+u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, struct in6_addr *src)
+{
+       u32 hash, id;
+
+       hash = __ipv6_addr_jhash(dst, hashrnd);
+       hash = __ipv6_addr_jhash(src, hash);
+
+       /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
+        * set the hight order instead thus minimizing possible future
+        * collisions.
+        */
+       id = ip_idents_reserve(hash, 1);
+       if (unlikely(!id))
+               id = 1 << 31;
+
+       return id;
+}
+
 /* This function exists only for tap drivers that must support broken
  * clients requesting UFO without specifying an IPv6 fragment ID.
  *
@@ -22,7 +40,7 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
        static u32 ip6_proxy_idents_hashrnd __read_mostly;
        struct in6_addr buf[2];
        struct in6_addr *addrs;
-       u32 hash, id;
+       u32 id;
 
        addrs = skb_header_pointer(skb,
                                   skb_network_offset(skb) +
@@ -34,14 +52,25 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
        net_get_random_once(&ip6_proxy_idents_hashrnd,
                            sizeof(ip6_proxy_idents_hashrnd));
 
-       hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd);
-       hash = __ipv6_addr_jhash(&addrs[0], hash);
-
-       id = ip_idents_reserve(hash, 1);
-       skb_shinfo(skb)->ip6_frag_id = htonl(id);
+       id = __ipv6_select_ident(ip6_proxy_idents_hashrnd,
+                                &addrs[1], &addrs[0]);
+       skb_shinfo(skb)->ip6_frag_id = id;
 }
 EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
 
+void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+{
+       static u32 ip6_idents_hashrnd __read_mostly;
+       u32 id;
+
+       net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
+
+       id = __ipv6_select_ident(ip6_idents_hashrnd, &rt->rt6i_dst.addr,
+                                &rt->rt6i_src.addr);
+       fhdr->identification = htonl(id);
+}
+EXPORT_SYMBOL(ipv6_select_ident);
+
 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 {
        u16 offset = sizeof(struct ipv6hdr);
index 2d3148378a1f6a7315b135090bf1fed57f181a3b..bd46f736f61d74bcb75a4dabef264154f55a9fb0 100644 (file)
@@ -163,8 +163,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        pfh.icmph.checksum = 0;
        pfh.icmph.un.echo.id = inet->inet_sport;
        pfh.icmph.un.echo.sequence = user_icmph.icmp6_sequence;
-       /* XXX: stripping const */
-       pfh.iov = (struct iovec *)msg->msg_iter.iov;
+       pfh.msg = msg;
        pfh.wcheck = 0;
        pfh.family = AF_INET6;
 
index ee25631f8c293db3db95a0992fa2b319872afb30..dae7f1a1e46481d72e5b61da3cc9990d03c36380 100644 (file)
@@ -609,7 +609,7 @@ out:
        return err;
 }
 
-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
+static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
                        struct flowi6 *fl6, struct dst_entry **dstp,
                        unsigned int flags)
 {
@@ -648,7 +648,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
        skb->ip_summed = CHECKSUM_NONE;
 
        skb->transport_header = skb->network_header;
-       err = memcpy_fromiovecend((void *)iph, from, 0, length);
+       err = memcpy_from_msg(iph, msg, length);
        if (err)
                goto error_fault;
 
@@ -886,8 +886,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
 
 back_from_confirm:
        if (inet->hdrincl)
-               /* XXX: stripping const */
-               err = rawv6_send_hdrinc(sk, (struct iovec *)msg->msg_iter.iov, len, &fl6, &dst, msg->msg_flags);
+               err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags);
        else {
                lock_sock(sk);
                err = ip6_append_data(sk, raw6_getfrag, &rfv,
index c91083156edbe2b631b1fb11e3ead35f1925b8a7..98565ce0ebcdff78da7c0f0c11737bbb8c2dee59 100644 (file)
@@ -499,7 +499,7 @@ static void rt6_probe_deferred(struct work_struct *w)
        addrconf_addr_solict_mult(&work->target, &mcaddr);
        ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
        dev_put(work->dev);
-       kfree(w);
+       kfree(work);
 }
 
 static void rt6_probe(struct rt6_info *rt)
@@ -853,14 +853,14 @@ EXPORT_SYMBOL(rt6_lookup);
  */
 
 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
-                       struct nlattr *mx, int mx_len)
+                       struct mx6_config *mxc)
 {
        int err;
        struct fib6_table *table;
 
        table = rt->rt6i_table;
        write_lock_bh(&table->tb6_lock);
-       err = fib6_add(&table->tb6_root, rt, info, mx, mx_len);
+       err = fib6_add(&table->tb6_root, rt, info, mxc);
        write_unlock_bh(&table->tb6_lock);
 
        return err;
@@ -868,10 +868,10 @@ static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
 
 int ip6_ins_rt(struct rt6_info *rt)
 {
-       struct nl_info info = {
-               .nl_net = dev_net(rt->dst.dev),
-       };
-       return __ip6_ins_rt(rt, &info, NULL, 0);
+       struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
+       struct mx6_config mxc = { .mx = NULL, };
+
+       return __ip6_ins_rt(rt, &info, &mxc);
 }
 
 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
@@ -1160,12 +1160,9 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
                struct net *net = dev_net(dst->dev);
 
                rt6->rt6i_flags |= RTF_MODIFIED;
-               if (mtu < IPV6_MIN_MTU) {
-                       u32 features = dst_metric(dst, RTAX_FEATURES);
+               if (mtu < IPV6_MIN_MTU)
                        mtu = IPV6_MIN_MTU;
-                       features |= RTAX_FEATURE_ALLFRAG;
-                       dst_metric_set(dst, RTAX_FEATURES, features);
-               }
+
                dst_metric_set(dst, RTAX_MTU, mtu);
                rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
        }
@@ -1245,12 +1242,16 @@ restart:
                rt = net->ipv6.ip6_null_entry;
        else if (rt->dst.error) {
                rt = net->ipv6.ip6_null_entry;
-       } else if (rt == net->ipv6.ip6_null_entry) {
+               goto out;
+       }
+
+       if (rt == net->ipv6.ip6_null_entry) {
                fn = fib6_backtrack(fn, &fl6->saddr);
                if (fn)
                        goto restart;
        }
 
+out:
        dst_hold(&rt->dst);
 
        read_unlock_bh(&table->tb6_lock);
@@ -1470,9 +1471,51 @@ out:
        return entries > rt_max_size;
 }
 
-/*
- *
- */
+static int ip6_convert_metrics(struct mx6_config *mxc,
+                              const struct fib6_config *cfg)
+{
+       struct nlattr *nla;
+       int remaining;
+       u32 *mp;
+
+       if (cfg->fc_mx == NULL)
+               return 0;
+
+       mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+       if (unlikely(!mp))
+               return -ENOMEM;
+
+       nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
+               int type = nla_type(nla);
+
+               if (type) {
+                       u32 val;
+
+                       if (unlikely(type > RTAX_MAX))
+                               goto err;
+                       if (type == RTAX_CC_ALGO) {
+                               char tmp[TCP_CA_NAME_MAX];
+
+                               nla_strlcpy(tmp, nla, sizeof(tmp));
+                               val = tcp_ca_get_key_by_name(tmp);
+                               if (val == TCP_CA_UNSPEC)
+                                       goto err;
+                       } else {
+                               val = nla_get_u32(nla);
+                       }
+
+                       mp[type - 1] = val;
+                       __set_bit(type - 1, mxc->mx_valid);
+               }
+       }
+
+       mxc->mx = mp;
+
+       return 0;
+ err:
+       kfree(mp);
+       return -EINVAL;
+}
 
 int ip6_route_add(struct fib6_config *cfg)
 {
@@ -1482,6 +1525,7 @@ int ip6_route_add(struct fib6_config *cfg)
        struct net_device *dev = NULL;
        struct inet6_dev *idev = NULL;
        struct fib6_table *table;
+       struct mx6_config mxc = { .mx = NULL, };
        int addr_type;
 
        if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
@@ -1677,8 +1721,14 @@ install_route:
 
        cfg->fc_nlinfo.nl_net = dev_net(dev);
 
-       return __ip6_ins_rt(rt, &cfg->fc_nlinfo, cfg->fc_mx, cfg->fc_mx_len);
+       err = ip6_convert_metrics(&mxc, cfg);
+       if (err)
+               goto out;
+
+       err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
 
+       kfree(mxc.mx);
+       return err;
 out:
        if (dev)
                dev_put(dev);
@@ -2534,7 +2584,8 @@ static inline size_t rt6_nlmsg_size(void)
               + nla_total_size(4) /* RTA_OIF */
               + nla_total_size(4) /* RTA_PRIORITY */
               + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
-              + nla_total_size(sizeof(struct rta_cacheinfo));
+              + nla_total_size(sizeof(struct rta_cacheinfo))
+              + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
 }
 
 static int rt6_fill_node(struct net *net,
@@ -2675,7 +2726,8 @@ static int rt6_fill_node(struct net *net,
        if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
index 213546bd6d5de5eeccfabe45c6dd4d575b475266..e4cbd5798eba0c9f393a3ca0857afa69ee666baf 100644 (file)
@@ -1506,12 +1506,12 @@ static bool ipip6_netlink_encap_parms(struct nlattr *data[],
 
        if (data[IFLA_IPTUN_ENCAP_SPORT]) {
                ret = true;
-               ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]);
+               ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
        }
 
        if (data[IFLA_IPTUN_ENCAP_DPORT]) {
                ret = true;
-               ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]);
+               ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
        }
 
        return ret;
@@ -1707,9 +1707,9 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
 
        if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
                        tunnel->encap.type) ||
-           nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT,
+           nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
                        tunnel->encap.sport) ||
-           nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT,
+           nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
                        tunnel->encap.dport) ||
            nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
                        tunnel->encap.flags))
@@ -1763,6 +1763,7 @@ static struct rtnl_link_ops sit_link_ops __read_mostly = {
        .get_size       = ipip6_get_size,
        .fill_info      = ipip6_fill_info,
        .dellink        = ipip6_dellink,
+       .get_link_net   = ip_tunnel_get_link_net,
 };
 
 static struct xfrm_tunnel sit_handler __read_mostly = {
index 9c0b54e87b472390c080857f886a2af4a7a300f8..5d46832c6f72b89a278a3326918a3c8bff9afed4 100644 (file)
@@ -1199,6 +1199,8 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
                                                     newnp->opt->opt_flen);
 
+       tcp_ca_openreq_child(newsk, dst);
+
        tcp_sync_mss(newsk, dst_mtu(dst));
        newtp->advmss = dst_metric_advmss(dst);
        if (tcp_sk(sk)->rx_opt.user_mss &&
index 189dc4ae3ecac1b140a7208c4b6de0b956e0b710..d048d46779fc55407e9c45d123d6ba7dd6858782 100644 (file)
@@ -909,7 +909,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                        goto csum_error;
                }
 
-               if (udp_sk(sk)->convert_csum && uh->check && !IS_UDPLITE(sk))
+               if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
                        skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
                                                 ip6_compute_pseudo);
 
@@ -990,9 +990,10 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
 {
        unsigned int offset;
        struct udphdr *uh = udp_hdr(skb);
+       struct sk_buff *frags = skb_shinfo(skb)->frag_list;
        __wsum csum = 0;
 
-       if (skb_queue_len(&sk->sk_write_queue) == 1) {
+       if (!frags) {
                /* Only one fragment on the socket.  */
                skb->csum_start = skb_transport_header(skb) - skb->head;
                skb->csum_offset = offsetof(struct udphdr, check);
@@ -1008,9 +1009,9 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
 
                skb->ip_summed = CHECKSUM_NONE;
 
-               skb_queue_walk(&sk->sk_write_queue, skb) {
-                       csum = csum_add(csum, skb->csum);
-               }
+               do {
+                       csum = csum_add(csum, frags->csum);
+               } while ((frags = frags->next));
 
                uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
                                            csum);
@@ -1023,26 +1024,15 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
  *     Sending
  */
 
-static int udp_v6_push_pending_frames(struct sock *sk)
+static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6)
 {
-       struct sk_buff *skb;
+       struct sock *sk = skb->sk;
        struct udphdr *uh;
-       struct udp_sock  *up = udp_sk(sk);
-       struct inet_sock *inet = inet_sk(sk);
-       struct flowi6 *fl6;
        int err = 0;
        int is_udplite = IS_UDPLITE(sk);
        __wsum csum = 0;
-
-       if (up->pending == AF_INET)
-               return udp_push_pending_frames(sk);
-
-       fl6 = &inet->cork.fl.u.ip6;
-
-       /* Grab the skbuff where UDP header space exists. */
-       skb = skb_peek(&sk->sk_write_queue);
-       if (skb == NULL)
-               goto out;
+       int offset = skb_transport_offset(skb);
+       int len = skb->len - offset;
 
        /*
         * Create a UDP header
@@ -1050,29 +1040,28 @@ static int udp_v6_push_pending_frames(struct sock *sk)
        uh = udp_hdr(skb);
        uh->source = fl6->fl6_sport;
        uh->dest = fl6->fl6_dport;
-       uh->len = htons(up->len);
+       uh->len = htons(len);
        uh->check = 0;
 
        if (is_udplite)
-               csum = udplite_csum_outgoing(sk, skb);
-       else if (up->no_check6_tx) {   /* UDP csum disabled */
+               csum = udplite_csum(skb);
+       else if (udp_sk(sk)->no_check6_tx) {   /* UDP csum disabled */
                skb->ip_summed = CHECKSUM_NONE;
                goto send;
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
-               udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr,
-                                    up->len);
+               udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
                goto send;
        } else
-               csum = udp_csum_outgoing(sk, skb);
+               csum = udp_csum(skb);
 
        /* add protocol-dependent pseudo-header */
        uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
-                                   up->len, fl6->flowi6_proto, csum);
+                                   len, fl6->flowi6_proto, csum);
        if (uh->check == 0)
                uh->check = CSUM_MANGLED_0;
 
 send:
-       err = ip6_push_pending_frames(sk);
+       err = ip6_send_skb(skb);
        if (err) {
                if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
                        UDP6_INC_STATS_USER(sock_net(sk),
@@ -1082,6 +1071,30 @@ send:
        } else
                UDP6_INC_STATS_USER(sock_net(sk),
                                    UDP_MIB_OUTDATAGRAMS, is_udplite);
+       return err;
+}
+
+static int udp_v6_push_pending_frames(struct sock *sk)
+{
+       struct sk_buff *skb;
+       struct udp_sock  *up = udp_sk(sk);
+       struct flowi6 fl6;
+       int err = 0;
+
+       if (up->pending == AF_INET)
+               return udp_push_pending_frames(sk);
+
+       /* ip6_finish_skb will release the cork, so make a copy of
+        * fl6 here.
+        */
+       fl6 = inet_sk(sk)->cork.fl.u.ip6;
+
+       skb = ip6_finish_skb(sk);
+       if (!skb)
+               goto out;
+
+       err = udp_v6_send_skb(skb, &fl6);
+
 out:
        up->len = 0;
        up->pending = 0;
@@ -1164,6 +1177,7 @@ do_udp_sendmsg:
        if (len > INT_MAX - sizeof(struct udphdr))
                return -EMSGSIZE;
 
+       getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
        if (up->pending) {
                /*
                 * There are pending frames.
@@ -1294,6 +1308,20 @@ do_udp_sendmsg:
                goto do_confirm;
 back_from_confirm:
 
+       /* Lockless fast path for the non-corking case */
+       if (!corkreq) {
+               struct sk_buff *skb;
+
+               skb = ip6_make_skb(sk, getfrag, msg, ulen,
+                                  sizeof(struct udphdr), hlimit, tclass, opt,
+                                  &fl6, (struct rt6_info *)dst,
+                                  msg->msg_flags, dontfrag);
+               err = PTR_ERR(skb);
+               if (!IS_ERR_OR_NULL(skb))
+                       err = udp_v6_send_skb(skb, &fl6);
+               goto release_dst;
+       }
+
        lock_sock(sk);
        if (unlikely(up->pending)) {
                /* The socket is already corked while preparing it. */
@@ -1311,7 +1339,6 @@ do_append_data:
        if (dontfrag < 0)
                dontfrag = np->dontfrag;
        up->len += ulen;
-       getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
        err = ip6_append_data(sk, getfrag, msg, ulen,
                sizeof(struct udphdr), hlimit, tclass, opt, &fl6,
                (struct rt6_info *)dst,
@@ -1323,6 +1350,11 @@ do_append_data:
        else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
                up->pending = 0;
 
+       if (err > 0)
+               err = np->recverr ? net_xmit_errno(err) : 0;
+       release_sock(sk);
+
+release_dst:
        if (dst) {
                if (connected) {
                        ip6_dst_store(sk, dst,
@@ -1339,9 +1371,6 @@ do_append_data:
                dst = NULL;
        }
 
-       if (err > 0)
-               err = np->recverr ? net_xmit_errno(err) : 0;
-       release_sock(sk);
 out:
        dst_release(dst);
        fl6_sock_release(flowlabel);
index b6aa8ed182579614d3738f107e839eb2e82e5371..a56276996b72b3f5d43121d6bf93422a58eefe62 100644 (file)
@@ -52,6 +52,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
 
                skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 
+               /* Set the IPv6 fragment id if not set yet */
+               if (!skb_shinfo(skb)->ip6_frag_id)
+                       ipv6_proxy_select_ident(skb);
+
                segs = NULL;
                goto out;
        }
@@ -108,7 +112,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
                fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
                fptr->nexthdr = nexthdr;
                fptr->reserved = 0;
-               fptr->identification = skb_shinfo(skb)->ip6_frag_id;
+               if (skb_shinfo(skb)->ip6_frag_id)
+                       fptr->identification = skb_shinfo(skb)->ip6_frag_id;
+               else
+                       ipv6_select_ident(fptr,
+                                         (struct rt6_info *)skb_dst(skb));
 
                /* Fragment the skb. ipv6 header and the remaining fields of the
                 * fragment header are updated in ipv6_gso_segment()
index 5f983644373a230890b25189865af73f5e2b3b44..48bf5a06847bd59db7834758b22aa9208d727940 100644 (file)
@@ -130,12 +130,18 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
 {
        struct flowi6 *fl6 = &fl->u.ip6;
        int onlyproto = 0;
-       u16 offset = skb_network_header_len(skb);
        const struct ipv6hdr *hdr = ipv6_hdr(skb);
+       u16 offset = sizeof(*hdr);
        struct ipv6_opt_hdr *exthdr;
        const unsigned char *nh = skb_network_header(skb);
-       u8 nexthdr = nh[IP6CB(skb)->nhoff];
+       u16 nhoff = IP6CB(skb)->nhoff;
        int oif = 0;
+       u8 nexthdr;
+
+       if (!nhoff)
+               nhoff = offsetof(struct ipv6hdr, nexthdr);
+
+       nexthdr = nh[nhoff];
 
        if (skb_dst(skb))
                oif = skb_dst(skb)->dev->ifindex;
index 7f2cafddfb6e774da73e2def256df22191507b4c..1cde711bcab550206b512938d825eaef71227efd 100644 (file)
@@ -533,7 +533,7 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
        info.discovery = discovery;
 
        /* sysctl_slot_timeout bounds are checked in irsysctl.c - Jean II */
-       self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
+       self->slot_timeout = msecs_to_jiffies(sysctl_slot_timeout);
 
        irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
 }
@@ -1015,13 +1015,15 @@ void irlap_apply_connection_parameters(struct irlap_cb *self, int now)
         * Or, this is how much we can keep the pf bit in primary mode.
         * Therefore, it must be lower or equal than our *OWN* max turn around.
         * Jean II */
-       self->poll_timeout = self->qos_tx.max_turn_time.value * HZ / 1000;
+       self->poll_timeout = msecs_to_jiffies(
+                               self->qos_tx.max_turn_time.value);
        /* The Final timeout applies only to the primary station.
         * It defines the maximum time the primary wait (mostly in RECV mode)
         * for an answer from the secondary station before polling it again.
         * Therefore, it must be greater or equal than our *PARTNER*
         * max turn around time - Jean II */
-       self->final_timeout = self->qos_rx.max_turn_time.value * HZ / 1000;
+       self->final_timeout = msecs_to_jiffies(
+                               self->qos_rx.max_turn_time.value);
        /* The Watchdog Bit timeout applies only to the secondary station.
         * It defines the maximum time the secondary wait (mostly in RECV mode)
         * for poll from the primary station before getting annoyed.
index 6b16598f31d557a40fa4baa0ca13a0384f685800..b4e923f7795460736f48c73e9942efad46b8a285 100644 (file)
@@ -390,7 +390,8 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
        }
 
 out:
-       return genlmsg_end(skb, hdr);
+       genlmsg_end(skb, hdr);
+       return 0;
 
 nla_put_failure:
        genlmsg_cancel(skb, hdr);
@@ -451,7 +452,7 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback
 
                if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
                                        cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                       tunnel, L2TP_CMD_TUNNEL_GET) <= 0)
+                                       tunnel, L2TP_CMD_TUNNEL_GET) < 0)
                        goto out;
 
                ti++;
@@ -752,7 +753,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
                goto nla_put_failure;
        nla_nest_end(skb, nest);
 
-       return genlmsg_end(skb, hdr);
+       genlmsg_end(skb, hdr);
+       return 0;
 
  nla_put_failure:
        genlmsg_cancel(skb, hdr);
@@ -816,7 +818,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
 
                if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
                                         cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                        session, L2TP_CMD_SESSION_GET) <= 0)
+                                        session, L2TP_CMD_SESSION_GET) < 0)
                        break;
 
                si++;
index 612a5ddaf93b1ab1b5a524c5efef8d6b1f769038..799bafc2af39ea191e3753c88c3f921e44a77162 100644 (file)
@@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = {
        {
                .procname       = "ack",
                .data           = &sysctl_llc2_ack_timeout,
-               .maxlen         = sizeof(long),
+               .maxlen         = sizeof(sysctl_llc2_ack_timeout),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
        {
                .procname       = "busy",
                .data           = &sysctl_llc2_busy_timeout,
-               .maxlen         = sizeof(long),
+               .maxlen         = sizeof(sysctl_llc2_busy_timeout),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
        {
                .procname       = "p",
                .data           = &sysctl_llc2_p_timeout,
-               .maxlen         = sizeof(long),
+               .maxlen         = sizeof(sysctl_llc2_p_timeout),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
        {
                .procname       = "rej",
                .data           = &sysctl_llc2_rej_timeout,
-               .maxlen         = sizeof(long),
+               .maxlen         = sizeof(sysctl_llc2_rej_timeout),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
index 75cc6801a4316dbeb25958192f018b8808681096..64a012a0c6e52dba4d026701402e56c6e82f73b5 100644 (file)
@@ -5,6 +5,7 @@ config MAC80211
        select CRYPTO_ARC4
        select CRYPTO_AES
        select CRYPTO_CCM
+       select CRYPTO_GCM
        select CRC32
        select AVERAGE
        ---help---
index e53671b1105e039ad95fadc8c3f96fd090550cc3..3275f01881bee8a53a046e117873347fe04877c8 100644 (file)
@@ -15,7 +15,9 @@ mac80211-y := \
        michael.o \
        tkip.o \
        aes_ccm.o \
+       aes_gcm.o \
        aes_cmac.o \
+       aes_gmac.o \
        cfg.o \
        ethtool.o \
        rx.o \
index 09d9caaec59112f40b060951ae16796388e2e741..7869bb40acaa1acbe60763493a738bf32812bb34 100644 (file)
@@ -20,7 +20,8 @@
 #include "aes_ccm.h"
 
 void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
-                              u8 *data, size_t data_len, u8 *mic)
+                              u8 *data, size_t data_len, u8 *mic,
+                              size_t mic_len)
 {
        struct scatterlist assoc, pt, ct[2];
 
@@ -35,7 +36,7 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
        sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
        sg_init_table(ct, 2);
        sg_set_buf(&ct[0], data, data_len);
-       sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
+       sg_set_buf(&ct[1], mic, mic_len);
 
        aead_request_set_tfm(aead_req, tfm);
        aead_request_set_assoc(aead_req, &assoc, assoc.length);
@@ -45,7 +46,8 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
 }
 
 int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
-                             u8 *data, size_t data_len, u8 *mic)
+                             u8 *data, size_t data_len, u8 *mic,
+                             size_t mic_len)
 {
        struct scatterlist assoc, pt, ct[2];
        char aead_req_data[sizeof(struct aead_request) +
@@ -62,17 +64,18 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
        sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
        sg_init_table(ct, 2);
        sg_set_buf(&ct[0], data, data_len);
-       sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
+       sg_set_buf(&ct[1], mic, mic_len);
 
        aead_request_set_tfm(aead_req, tfm);
        aead_request_set_assoc(aead_req, &assoc, assoc.length);
-       aead_request_set_crypt(aead_req, ct, &pt,
-                              data_len + IEEE80211_CCMP_MIC_LEN, b_0);
+       aead_request_set_crypt(aead_req, ct, &pt, data_len + mic_len, b_0);
 
        return crypto_aead_decrypt(aead_req);
 }
 
-struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[])
+struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
+                                                   size_t key_len,
+                                                   size_t mic_len)
 {
        struct crypto_aead *tfm;
        int err;
@@ -81,9 +84,9 @@ struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[])
        if (IS_ERR(tfm))
                return tfm;
 
-       err = crypto_aead_setkey(tfm, key, WLAN_KEY_LEN_CCMP);
+       err = crypto_aead_setkey(tfm, key, key_len);
        if (!err)
-               err = crypto_aead_setauthsize(tfm, IEEE80211_CCMP_MIC_LEN);
+               err = crypto_aead_setauthsize(tfm, mic_len);
        if (!err)
                return tfm;
 
index 2c7ab1948a2edba3964a5c0edfb7e941752719f6..6a73d1e4d186d34a00da8c2f8e509985c3805607 100644 (file)
 
 #include <linux/crypto.h>
 
-struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[]);
+struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
+                                                   size_t key_len,
+                                                   size_t mic_len);
 void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
-                              u8 *data, size_t data_len, u8 *mic);
+                              u8 *data, size_t data_len, u8 *mic,
+                              size_t mic_len);
 int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
-                             u8 *data, size_t data_len, u8 *mic);
+                             u8 *data, size_t data_len, u8 *mic,
+                             size_t mic_len);
 void ieee80211_aes_key_free(struct crypto_aead *tfm);
 
 #endif /* AES_CCM_H */
index 9b9009f99551bb18b9613b4c470361d7fded4eb3..4192806be3d36884d22ce5830a43cae63d54745d 100644 (file)
@@ -18,8 +18,8 @@
 #include "key.h"
 #include "aes_cmac.h"
 
-#define AES_CMAC_KEY_LEN 16
 #define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */
+#define CMAC_TLEN_256 16 /* CMAC TLen = 128 bits (16 octets) */
 #define AAD_LEN 20
 
 
@@ -35,9 +35,9 @@ static void gf_mulx(u8 *pad)
                pad[AES_BLOCK_SIZE - 1] ^= 0x87;
 }
 
-
-static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
-                               const u8 *addr[], const size_t *len, u8 *mac)
+static void aes_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
+                           const u8 *addr[], const size_t *len, u8 *mac,
+                           size_t mac_len)
 {
        u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
        const u8 *pos, *end;
@@ -88,7 +88,7 @@ static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
        for (i = 0; i < AES_BLOCK_SIZE; i++)
                pad[i] ^= cbc[i];
        crypto_cipher_encrypt_one(tfm, pad, pad);
-       memcpy(mac, pad, CMAC_TLEN);
+       memcpy(mac, pad, mac_len);
 }
 
 
@@ -107,17 +107,35 @@ void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
        addr[2] = zero;
        len[2] = CMAC_TLEN;
 
-       aes_128_cmac_vector(tfm, 3, addr, len, mic);
+       aes_cmac_vector(tfm, 3, addr, len, mic, CMAC_TLEN);
 }
 
+void ieee80211_aes_cmac_256(struct crypto_cipher *tfm, const u8 *aad,
+                           const u8 *data, size_t data_len, u8 *mic)
+{
+       const u8 *addr[3];
+       size_t len[3];
+       u8 zero[CMAC_TLEN_256];
+
+       memset(zero, 0, CMAC_TLEN_256);
+       addr[0] = aad;
+       len[0] = AAD_LEN;
+       addr[1] = data;
+       len[1] = data_len - CMAC_TLEN_256;
+       addr[2] = zero;
+       len[2] = CMAC_TLEN_256;
+
+       aes_cmac_vector(tfm, 3, addr, len, mic, CMAC_TLEN_256);
+}
 
-struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[])
+struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[],
+                                                  size_t key_len)
 {
        struct crypto_cipher *tfm;
 
        tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
        if (!IS_ERR(tfm))
-               crypto_cipher_setkey(tfm, key, AES_CMAC_KEY_LEN);
+               crypto_cipher_setkey(tfm, key, key_len);
 
        return tfm;
 }
index 0ce6487af79536c03e7cae25639eeacd389f5f18..3702041f44fdb16ce382c701eb53ef9493009f52 100644 (file)
 
 #include <linux/crypto.h>
 
-struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[]);
+struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[],
+                                                  size_t key_len);
 void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
                        const u8 *data, size_t data_len, u8 *mic);
+void ieee80211_aes_cmac_256(struct crypto_cipher *tfm, const u8 *aad,
+                           const u8 *data, size_t data_len, u8 *mic);
 void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm);
 
 #endif /* AES_CMAC_H */
diff --git a/net/mac80211/aes_gcm.c b/net/mac80211/aes_gcm.c
new file mode 100644 (file)
index 0000000..c2bf669
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2014-2015, Qualcomm Atheros, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <crypto/aes.h>
+
+#include <net/mac80211.h>
+#include "key.h"
+#include "aes_gcm.h"
+
+void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+                              u8 *data, size_t data_len, u8 *mic)
+{
+       struct scatterlist assoc, pt, ct[2];
+
+       char aead_req_data[sizeof(struct aead_request) +
+                          crypto_aead_reqsize(tfm)]
+               __aligned(__alignof__(struct aead_request));
+       struct aead_request *aead_req = (void *)aead_req_data;
+
+       memset(aead_req, 0, sizeof(aead_req_data));
+
+       sg_init_one(&pt, data, data_len);
+       sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
+       sg_init_table(ct, 2);
+       sg_set_buf(&ct[0], data, data_len);
+       sg_set_buf(&ct[1], mic, IEEE80211_GCMP_MIC_LEN);
+
+       aead_request_set_tfm(aead_req, tfm);
+       aead_request_set_assoc(aead_req, &assoc, assoc.length);
+       aead_request_set_crypt(aead_req, &pt, ct, data_len, j_0);
+
+       crypto_aead_encrypt(aead_req);
+}
+
+int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+                             u8 *data, size_t data_len, u8 *mic)
+{
+       struct scatterlist assoc, pt, ct[2];
+       char aead_req_data[sizeof(struct aead_request) +
+                          crypto_aead_reqsize(tfm)]
+               __aligned(__alignof__(struct aead_request));
+       struct aead_request *aead_req = (void *)aead_req_data;
+
+       if (data_len == 0)
+               return -EINVAL;
+
+       memset(aead_req, 0, sizeof(aead_req_data));
+
+       sg_init_one(&pt, data, data_len);
+       sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
+       sg_init_table(ct, 2);
+       sg_set_buf(&ct[0], data, data_len);
+       sg_set_buf(&ct[1], mic, IEEE80211_GCMP_MIC_LEN);
+
+       aead_request_set_tfm(aead_req, tfm);
+       aead_request_set_assoc(aead_req, &assoc, assoc.length);
+       aead_request_set_crypt(aead_req, ct, &pt,
+                              data_len + IEEE80211_GCMP_MIC_LEN, j_0);
+
+       return crypto_aead_decrypt(aead_req);
+}
+
+struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
+                                                       size_t key_len)
+{
+       struct crypto_aead *tfm;
+       int err;
+
+       tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm))
+               return tfm;
+
+       err = crypto_aead_setkey(tfm, key, key_len);
+       if (!err)
+               err = crypto_aead_setauthsize(tfm, IEEE80211_GCMP_MIC_LEN);
+       if (!err)
+               return tfm;
+
+       crypto_free_aead(tfm);
+       return ERR_PTR(err);
+}
+
+void ieee80211_aes_gcm_key_free(struct crypto_aead *tfm)
+{
+       crypto_free_aead(tfm);
+}
diff --git a/net/mac80211/aes_gcm.h b/net/mac80211/aes_gcm.h
new file mode 100644 (file)
index 0000000..1347fda
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2014-2015, Qualcomm Atheros, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef AES_GCM_H
+#define AES_GCM_H
+
+#include <linux/crypto.h>
+
+void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+                              u8 *data, size_t data_len, u8 *mic);
+int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+                             u8 *data, size_t data_len, u8 *mic);
+struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
+                                                       size_t key_len);
+void ieee80211_aes_gcm_key_free(struct crypto_aead *tfm);
+
+#endif /* AES_GCM_H */
diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c
new file mode 100644 (file)
index 0000000..1c72edc
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * AES-GMAC for IEEE 802.11 BIP-GMAC-128 and BIP-GMAC-256
+ * Copyright 2015, Qualcomm Atheros, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <crypto/aes.h>
+
+#include <net/mac80211.h>
+#include "key.h"
+#include "aes_gmac.h"
+
+#define GMAC_MIC_LEN 16
+#define GMAC_NONCE_LEN 12
+#define AAD_LEN 20
+
+int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
+                      const u8 *data, size_t data_len, u8 *mic)
+{
+       struct scatterlist sg[3], ct[1];
+       char aead_req_data[sizeof(struct aead_request) +
+                          crypto_aead_reqsize(tfm)]
+               __aligned(__alignof__(struct aead_request));
+       struct aead_request *aead_req = (void *)aead_req_data;
+       u8 zero[GMAC_MIC_LEN], iv[AES_BLOCK_SIZE];
+
+       if (data_len < GMAC_MIC_LEN)
+               return -EINVAL;
+
+       memset(aead_req, 0, sizeof(aead_req_data));
+
+       memset(zero, 0, GMAC_MIC_LEN);
+       sg_init_table(sg, 3);
+       sg_set_buf(&sg[0], aad, AAD_LEN);
+       sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN);
+       sg_set_buf(&sg[2], zero, GMAC_MIC_LEN);
+
+       memcpy(iv, nonce, GMAC_NONCE_LEN);
+       memset(iv + GMAC_NONCE_LEN, 0, sizeof(iv) - GMAC_NONCE_LEN);
+       iv[AES_BLOCK_SIZE - 1] = 0x01;
+
+       sg_init_table(ct, 1);
+       sg_set_buf(&ct[0], mic, GMAC_MIC_LEN);
+
+       aead_request_set_tfm(aead_req, tfm);
+       aead_request_set_assoc(aead_req, sg, AAD_LEN + data_len);
+       aead_request_set_crypt(aead_req, NULL, ct, 0, iv);
+
+       crypto_aead_encrypt(aead_req);
+
+       return 0;
+}
+
+struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
+                                                size_t key_len)
+{
+       struct crypto_aead *tfm;
+       int err;
+
+       tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm))
+               return tfm;
+
+       err = crypto_aead_setkey(tfm, key, key_len);
+       if (!err)
+               return tfm;
+       if (!err)
+               err = crypto_aead_setauthsize(tfm, GMAC_MIC_LEN);
+
+       crypto_free_aead(tfm);
+       return ERR_PTR(err);
+}
+
+void ieee80211_aes_gmac_key_free(struct crypto_aead *tfm)
+{
+       crypto_free_aead(tfm);
+}
diff --git a/net/mac80211/aes_gmac.h b/net/mac80211/aes_gmac.h
new file mode 100644 (file)
index 0000000..d328204
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2015, Qualcomm Atheros, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef AES_GMAC_H
+#define AES_GMAC_H
+
+#include <linux/crypto.h>
+
+struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
+                                                size_t key_len);
+int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
+                      const u8 *data, size_t data_len, u8 *mic);
+void ieee80211_aes_gmac_key_free(struct crypto_aead *tfm);
+
+#endif /* AES_GMAC_H */
index ff090ef1ea2cdd57d85484b9f24ec4c801873e3e..dd4ff36c557a44158ef64cd18aa090600fec1faf 100644 (file)
@@ -162,8 +162,13 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
                        return -EINVAL;
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
        case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
                break;
        default:
                cs = ieee80211_cs_get(local, params->cipher, sdata->vif.type);
@@ -348,6 +353,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
                params.seq_len = 6;
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                pn64 = atomic64_read(&key->u.ccmp.tx_pn);
                seq[0] = pn64;
                seq[1] = pn64 >> 8;
@@ -359,6 +365,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
                params.seq_len = 6;
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
                seq[0] = pn64;
                seq[1] = pn64 >> 8;
@@ -369,6 +376,30 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
                params.seq = seq;
                params.seq_len = 6;
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               pn64 = atomic64_read(&key->u.aes_gmac.tx_pn);
+               seq[0] = pn64;
+               seq[1] = pn64 >> 8;
+               seq[2] = pn64 >> 16;
+               seq[3] = pn64 >> 24;
+               seq[4] = pn64 >> 32;
+               seq[5] = pn64 >> 40;
+               params.seq = seq;
+               params.seq_len = 6;
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               pn64 = atomic64_read(&key->u.gcmp.tx_pn);
+               seq[0] = pn64;
+               seq[1] = pn64 >> 8;
+               seq[2] = pn64 >> 16;
+               seq[3] = pn64 >> 24;
+               seq[4] = pn64 >> 32;
+               seq[5] = pn64 >> 40;
+               params.seq = seq;
+               params.seq_len = 6;
+               break;
        }
 
        params.key = key->conf.key;
@@ -2110,6 +2141,8 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
 {
        struct ieee80211_local *local = wiphy_priv(wiphy);
        struct ieee80211_sub_if_data *sdata;
+       enum nl80211_tx_power_setting txp_type = type;
+       bool update_txp_type = false;
 
        if (wdev) {
                sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
@@ -2117,6 +2150,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
                switch (type) {
                case NL80211_TX_POWER_AUTOMATIC:
                        sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
+                       txp_type = NL80211_TX_POWER_LIMITED;
                        break;
                case NL80211_TX_POWER_LIMITED:
                case NL80211_TX_POWER_FIXED:
@@ -2126,7 +2160,12 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
                        break;
                }
 
-               ieee80211_recalc_txpower(sdata);
+               if (txp_type != sdata->vif.bss_conf.txpower_type) {
+                       update_txp_type = true;
+                       sdata->vif.bss_conf.txpower_type = txp_type;
+               }
+
+               ieee80211_recalc_txpower(sdata, update_txp_type);
 
                return 0;
        }
@@ -2134,6 +2173,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
        switch (type) {
        case NL80211_TX_POWER_AUTOMATIC:
                local->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
+               txp_type = NL80211_TX_POWER_LIMITED;
                break;
        case NL80211_TX_POWER_LIMITED:
        case NL80211_TX_POWER_FIXED:
@@ -2144,10 +2184,14 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
        }
 
        mutex_lock(&local->iflist_mtx);
-       list_for_each_entry(sdata, &local->interfaces, list)
+       list_for_each_entry(sdata, &local->interfaces, list) {
                sdata->user_power_level = local->user_power_level;
+               if (txp_type != sdata->vif.bss_conf.txpower_type)
+                       update_txp_type = true;
+               sdata->vif.bss_conf.txpower_type = txp_type;
+       }
        list_for_each_entry(sdata, &local->interfaces, list)
-               ieee80211_recalc_txpower(sdata);
+               ieee80211_recalc_txpower(sdata, update_txp_type);
        mutex_unlock(&local->iflist_mtx);
 
        return 0;
index 35b11e11e0c49ba81cb4ecdc9b3624e02628f503..ff0d2db09df9db467a5831606971e02f2fe6d410 100644 (file)
@@ -655,7 +655,7 @@ out:
        }
 
        if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) {
-               ieee80211_recalc_txpower(sdata);
+               ieee80211_recalc_txpower(sdata, false);
                ieee80211_recalc_chanctx_min_def(local, new_ctx);
        }
 
@@ -1387,7 +1387,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
                                ieee80211_bss_info_change_notify(sdata,
                                                                 changed);
 
-                       ieee80211_recalc_txpower(sdata);
+                       ieee80211_recalc_txpower(sdata, false);
                }
 
                ieee80211_recalc_chanctx_chantype(local, ctx);
index 5523b94c7c908f89e7d489903f486a99d738e7d3..71ac1b5f4da5632ab64893dfc2305c77a5a8b4d7 100644 (file)
@@ -94,17 +94,33 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
                                key->u.tkip.tx.iv16);
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                pn = atomic64_read(&key->u.ccmp.tx_pn);
                len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
                                (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
                                (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                pn = atomic64_read(&key->u.aes_cmac.tx_pn);
                len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
                                (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
                                (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               pn = atomic64_read(&key->u.aes_gmac.tx_pn);
+               len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
+                               (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
+                               (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               pn = atomic64_read(&key->u.gcmp.tx_pn);
+               len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
+                               (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
+                               (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
+               break;
        default:
                return 0;
        }
@@ -134,6 +150,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
                len = p - buf;
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
                        rpn = key->u.ccmp.rx_pn[i];
                        p += scnprintf(p, sizeof(buf)+buf-p,
@@ -144,6 +161,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
                len = p - buf;
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                rpn = key->u.aes_cmac.rx_pn;
                p += scnprintf(p, sizeof(buf)+buf-p,
                               "%02x%02x%02x%02x%02x%02x\n",
@@ -151,6 +169,26 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
                               rpn[3], rpn[4], rpn[5]);
                len = p - buf;
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               rpn = key->u.aes_gmac.rx_pn;
+               p += scnprintf(p, sizeof(buf)+buf-p,
+                              "%02x%02x%02x%02x%02x%02x\n",
+                              rpn[0], rpn[1], rpn[2],
+                              rpn[3], rpn[4], rpn[5]);
+               len = p - buf;
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
+                       rpn = key->u.gcmp.rx_pn[i];
+                       p += scnprintf(p, sizeof(buf)+buf-p,
+                                      "%02x%02x%02x%02x%02x%02x\n",
+                                      rpn[0], rpn[1], rpn[2],
+                                      rpn[3], rpn[4], rpn[5]);
+               }
+               len = p - buf;
+               break;
        default:
                return 0;
        }
@@ -167,12 +205,23 @@ static ssize_t key_replays_read(struct file *file, char __user *userbuf,
 
        switch (key->conf.cipher) {
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays);
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                len = scnprintf(buf, sizeof(buf), "%u\n",
                                key->u.aes_cmac.replays);
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               len = scnprintf(buf, sizeof(buf), "%u\n",
+                               key->u.aes_gmac.replays);
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               len = scnprintf(buf, sizeof(buf), "%u\n", key->u.gcmp.replays);
+               break;
        default:
                return 0;
        }
@@ -189,9 +238,15 @@ static ssize_t key_icverrors_read(struct file *file, char __user *userbuf,
 
        switch (key->conf.cipher) {
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                len = scnprintf(buf, sizeof(buf), "%u\n",
                                key->u.aes_cmac.icverrors);
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               len = scnprintf(buf, sizeof(buf), "%u\n",
+                               key->u.aes_gmac.icverrors);
+               break;
        default:
                return 0;
        }
index 156ea79e01579405de37480fdb0797768f675ecf..3afe36824703f49dcfe374e103b6372a9851b8aa 100644 (file)
@@ -1621,7 +1621,8 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local);
 void ieee80211_del_virtual_monitor(struct ieee80211_local *local);
 
 bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
-void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
+void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
+                             bool update_bss);
 
 static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
 {
@@ -1751,7 +1752,8 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
 {
        struct ieee80211_local *local = hw_to_local(hw);
 
-       WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
+       WARN(test_bit(SCAN_HW_SCANNING, &local->scanning) &&
+            !test_bit(SCAN_COMPLETED, &local->scanning),
                "%s: resume with hardware scan still in progress\n",
                wiphy_name(hw->wiphy));
 
@@ -1885,6 +1887,36 @@ void __ieee80211_flush_queues(struct ieee80211_local *local,
                              struct ieee80211_sub_if_data *sdata,
                              unsigned int queues, bool drop);
 
+static inline bool ieee80211_can_run_worker(struct ieee80211_local *local)
+{
+       /*
+        * If quiescing is set, we are racing with __ieee80211_suspend.
+        * __ieee80211_suspend flushes the workers after setting quiescing,
+        * and we check quiescing / suspended before enqueing new workers.
+        * We should abort the worker to avoid the races below.
+        */
+       if (local->quiescing)
+               return false;
+
+       /*
+        * We might already be suspended if the following scenario occurs:
+        * __ieee80211_suspend          Control path
+        *
+        *                              if (local->quiescing)
+        *                                      return;
+        * local->quiescing = true;
+        * flush_workqueue();
+        *                              queue_work(...);
+        * local->suspended = true;
+        * local->quiescing = false;
+        *                              worker starts running...
+        */
+       if (local->suspended)
+               return false;
+
+       return true;
+}
+
 void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
                         u16 transaction, u16 auth_alg, u16 status,
                         const u8 *extra, size_t extra_len, const u8 *bssid,
index 677422e11e075137ce0c85bb6819ce0db17b187b..81a27516813e2f3473bec783ef9253b54967e62d 100644 (file)
@@ -73,9 +73,10 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
        return false;
 }
 
-void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
+void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
+                             bool update_bss)
 {
-       if (__ieee80211_recalc_txpower(sdata))
+       if (__ieee80211_recalc_txpower(sdata) || update_bss)
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
 }
 
@@ -1169,12 +1170,7 @@ static void ieee80211_iface_work(struct work_struct *work)
        if (local->scanning)
                return;
 
-       /*
-        * ieee80211_queue_work() should have picked up most cases,
-        * here we'll pick the rest.
-        */
-       if (WARN(local->suspended,
-                "interface work scheduled while going to suspend\n"))
+       if (!ieee80211_can_run_worker(local))
                return;
 
        /* first process frames */
index f8d9f0ee59bf1b549ba18c4be160bbf3533e2249..0825d76edcfc81d93c4afa70ec81e157a7ecc2d2 100644 (file)
@@ -24,6 +24,8 @@
 #include "debugfs_key.h"
 #include "aes_ccm.h"
 #include "aes_cmac.h"
+#include "aes_gmac.h"
+#include "aes_gcm.h"
 
 
 /**
@@ -90,7 +92,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
 {
        struct ieee80211_sub_if_data *sdata;
        struct sta_info *sta;
-       int ret;
+       int ret = -EOPNOTSUPP;
 
        might_sleep();
 
@@ -150,7 +152,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
                return 0;
        }
 
-       if (ret != -ENOSPC && ret != -EOPNOTSUPP)
+       if (ret != -ENOSPC && ret != -EOPNOTSUPP && ret != 1)
                sdata_err(sdata,
                          "failed to set key (%d, %pM) to hardware (%d)\n",
                          key->conf.keyidx,
@@ -162,8 +164,18 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
        case WLAN_CIPHER_SUITE_WEP104:
        case WLAN_CIPHER_SUITE_TKIP:
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
        case WLAN_CIPHER_SUITE_AES_CMAC:
-               /* all of these we can do in software */
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               /* all of these we can do in software - if driver can */
+               if (ret == 1)
+                       return 0;
+               if (key->local->hw.flags & IEEE80211_HW_SW_CRYPTO_CONTROL)
+                       return -EINVAL;
                return 0;
        default:
                return -EINVAL;
@@ -382,7 +394,26 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
                 * Initialize AES key state here as an optimization so that
                 * it does not need to be initialized for every packet.
                 */
-               key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(key_data);
+               key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(
+                       key_data, key_len, IEEE80211_CCMP_MIC_LEN);
+               if (IS_ERR(key->u.ccmp.tfm)) {
+                       err = PTR_ERR(key->u.ccmp.tfm);
+                       kfree(key);
+                       return ERR_PTR(err);
+               }
+               break;
+       case WLAN_CIPHER_SUITE_CCMP_256:
+               key->conf.iv_len = IEEE80211_CCMP_256_HDR_LEN;
+               key->conf.icv_len = IEEE80211_CCMP_256_MIC_LEN;
+               for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++)
+                       for (j = 0; j < IEEE80211_CCMP_256_PN_LEN; j++)
+                               key->u.ccmp.rx_pn[i][j] =
+                                       seq[IEEE80211_CCMP_256_PN_LEN - j - 1];
+               /* Initialize AES key state here as an optimization so that
+                * it does not need to be initialized for every packet.
+                */
+               key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(
+                       key_data, key_len, IEEE80211_CCMP_256_MIC_LEN);
                if (IS_ERR(key->u.ccmp.tfm)) {
                        err = PTR_ERR(key->u.ccmp.tfm);
                        kfree(key);
@@ -390,8 +421,12 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
                }
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                key->conf.iv_len = 0;
-               key->conf.icv_len = sizeof(struct ieee80211_mmie);
+               if (cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+                       key->conf.icv_len = sizeof(struct ieee80211_mmie);
+               else
+                       key->conf.icv_len = sizeof(struct ieee80211_mmie_16);
                if (seq)
                        for (j = 0; j < IEEE80211_CMAC_PN_LEN; j++)
                                key->u.aes_cmac.rx_pn[j] =
@@ -401,13 +436,51 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
                 * it does not need to be initialized for every packet.
                 */
                key->u.aes_cmac.tfm =
-                       ieee80211_aes_cmac_key_setup(key_data);
+                       ieee80211_aes_cmac_key_setup(key_data, key_len);
                if (IS_ERR(key->u.aes_cmac.tfm)) {
                        err = PTR_ERR(key->u.aes_cmac.tfm);
                        kfree(key);
                        return ERR_PTR(err);
                }
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               key->conf.iv_len = 0;
+               key->conf.icv_len = sizeof(struct ieee80211_mmie_16);
+               if (seq)
+                       for (j = 0; j < IEEE80211_GMAC_PN_LEN; j++)
+                               key->u.aes_gmac.rx_pn[j] =
+                                       seq[IEEE80211_GMAC_PN_LEN - j - 1];
+               /* Initialize AES key state here as an optimization so that
+                * it does not need to be initialized for every packet.
+                */
+               key->u.aes_gmac.tfm =
+                       ieee80211_aes_gmac_key_setup(key_data, key_len);
+               if (IS_ERR(key->u.aes_gmac.tfm)) {
+                       err = PTR_ERR(key->u.aes_gmac.tfm);
+                       kfree(key);
+                       return ERR_PTR(err);
+               }
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               key->conf.iv_len = IEEE80211_GCMP_HDR_LEN;
+               key->conf.icv_len = IEEE80211_GCMP_MIC_LEN;
+               for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++)
+                       for (j = 0; j < IEEE80211_GCMP_PN_LEN; j++)
+                               key->u.gcmp.rx_pn[i][j] =
+                                       seq[IEEE80211_GCMP_PN_LEN - j - 1];
+               /* Initialize AES key state here as an optimization so that
+                * it does not need to be initialized for every packet.
+                */
+               key->u.gcmp.tfm = ieee80211_aes_gcm_key_setup_encrypt(key_data,
+                                                                     key_len);
+               if (IS_ERR(key->u.gcmp.tfm)) {
+                       err = PTR_ERR(key->u.gcmp.tfm);
+                       kfree(key);
+                       return ERR_PTR(err);
+               }
+               break;
        default:
                if (cs) {
                        size_t len = (seq_len > MAX_PN_LEN) ?
@@ -429,10 +502,24 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
 
 static void ieee80211_key_free_common(struct ieee80211_key *key)
 {
-       if (key->conf.cipher == WLAN_CIPHER_SUITE_CCMP)
+       switch (key->conf.cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                ieee80211_aes_key_free(key->u.ccmp.tfm);
-       if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+               break;
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
+               break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               ieee80211_aes_gmac_key_free(key->u.aes_gmac.tfm);
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               ieee80211_aes_gcm_key_free(key->u.gcmp.tfm);
+               break;
+       }
        kzfree(key);
 }
 
@@ -739,6 +826,7 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
                seq->tkip.iv16 = key->u.tkip.tx.iv16;
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                pn64 = atomic64_read(&key->u.ccmp.tx_pn);
                seq->ccmp.pn[5] = pn64;
                seq->ccmp.pn[4] = pn64 >> 8;
@@ -748,6 +836,7 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
                seq->ccmp.pn[0] = pn64 >> 40;
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
                seq->ccmp.pn[5] = pn64;
                seq->ccmp.pn[4] = pn64 >> 8;
@@ -756,6 +845,26 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
                seq->ccmp.pn[1] = pn64 >> 32;
                seq->ccmp.pn[0] = pn64 >> 40;
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               pn64 = atomic64_read(&key->u.aes_gmac.tx_pn);
+               seq->ccmp.pn[5] = pn64;
+               seq->ccmp.pn[4] = pn64 >> 8;
+               seq->ccmp.pn[3] = pn64 >> 16;
+               seq->ccmp.pn[2] = pn64 >> 24;
+               seq->ccmp.pn[1] = pn64 >> 32;
+               seq->ccmp.pn[0] = pn64 >> 40;
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               pn64 = atomic64_read(&key->u.gcmp.tx_pn);
+               seq->gcmp.pn[5] = pn64;
+               seq->gcmp.pn[4] = pn64 >> 8;
+               seq->gcmp.pn[3] = pn64 >> 16;
+               seq->gcmp.pn[2] = pn64 >> 24;
+               seq->gcmp.pn[1] = pn64 >> 32;
+               seq->gcmp.pn[0] = pn64 >> 40;
+               break;
        default:
                WARN_ON(1);
        }
@@ -778,6 +887,7 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
                seq->tkip.iv16 = key->u.tkip.rx[tid].iv16;
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
                        return;
                if (tid < 0)
@@ -787,11 +897,29 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
                memcpy(seq->ccmp.pn, pn, IEEE80211_CCMP_PN_LEN);
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                if (WARN_ON(tid != 0))
                        return;
                pn = key->u.aes_cmac.rx_pn;
                memcpy(seq->aes_cmac.pn, pn, IEEE80211_CMAC_PN_LEN);
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               if (WARN_ON(tid != 0))
+                       return;
+               pn = key->u.aes_gmac.rx_pn;
+               memcpy(seq->aes_gmac.pn, pn, IEEE80211_GMAC_PN_LEN);
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
+                       return;
+               if (tid < 0)
+                       pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS];
+               else
+                       pn = key->u.gcmp.rx_pn[tid];
+               memcpy(seq->gcmp.pn, pn, IEEE80211_GCMP_PN_LEN);
+               break;
        }
 }
 EXPORT_SYMBOL(ieee80211_get_key_rx_seq);
@@ -810,6 +938,7 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
                key->u.tkip.tx.iv16 = seq->tkip.iv16;
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                pn64 = (u64)seq->ccmp.pn[5] |
                       ((u64)seq->ccmp.pn[4] << 8) |
                       ((u64)seq->ccmp.pn[3] << 16) |
@@ -819,6 +948,7 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
                atomic64_set(&key->u.ccmp.tx_pn, pn64);
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                pn64 = (u64)seq->aes_cmac.pn[5] |
                       ((u64)seq->aes_cmac.pn[4] << 8) |
                       ((u64)seq->aes_cmac.pn[3] << 16) |
@@ -827,6 +957,26 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
                       ((u64)seq->aes_cmac.pn[0] << 40);
                atomic64_set(&key->u.aes_cmac.tx_pn, pn64);
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               pn64 = (u64)seq->aes_gmac.pn[5] |
+                      ((u64)seq->aes_gmac.pn[4] << 8) |
+                      ((u64)seq->aes_gmac.pn[3] << 16) |
+                      ((u64)seq->aes_gmac.pn[2] << 24) |
+                      ((u64)seq->aes_gmac.pn[1] << 32) |
+                      ((u64)seq->aes_gmac.pn[0] << 40);
+               atomic64_set(&key->u.aes_gmac.tx_pn, pn64);
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               pn64 = (u64)seq->gcmp.pn[5] |
+                      ((u64)seq->gcmp.pn[4] << 8) |
+                      ((u64)seq->gcmp.pn[3] << 16) |
+                      ((u64)seq->gcmp.pn[2] << 24) |
+                      ((u64)seq->gcmp.pn[1] << 32) |
+                      ((u64)seq->gcmp.pn[0] << 40);
+               atomic64_set(&key->u.gcmp.tx_pn, pn64);
+               break;
        default:
                WARN_ON(1);
                break;
@@ -850,6 +1000,7 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
                key->u.tkip.rx[tid].iv16 = seq->tkip.iv16;
                break;
        case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
                if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
                        return;
                if (tid < 0)
@@ -859,11 +1010,29 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
                memcpy(pn, seq->ccmp.pn, IEEE80211_CCMP_PN_LEN);
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                if (WARN_ON(tid != 0))
                        return;
                pn = key->u.aes_cmac.rx_pn;
                memcpy(pn, seq->aes_cmac.pn, IEEE80211_CMAC_PN_LEN);
                break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               if (WARN_ON(tid != 0))
+                       return;
+               pn = key->u.aes_gmac.rx_pn;
+               memcpy(pn, seq->aes_gmac.pn, IEEE80211_GMAC_PN_LEN);
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
+                       return;
+               if (tid < 0)
+                       pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS];
+               else
+                       pn = key->u.gcmp.rx_pn[tid];
+               memcpy(pn, seq->gcmp.pn, IEEE80211_GCMP_PN_LEN);
+               break;
        default:
                WARN_ON(1);
                break;
index 19db68663d7555461768eeae45a0afad8b0b163b..d57a9915494f94eb44bfbf3ed609286d8eb9907f 100644 (file)
@@ -94,6 +94,24 @@ struct ieee80211_key {
                        u32 replays; /* dot11RSNAStatsCMACReplays */
                        u32 icverrors; /* dot11RSNAStatsCMACICVErrors */
                } aes_cmac;
+               struct {
+                       atomic64_t tx_pn;
+                       u8 rx_pn[IEEE80211_GMAC_PN_LEN];
+                       struct crypto_aead *tfm;
+                       u32 replays; /* dot11RSNAStatsCMACReplays */
+                       u32 icverrors; /* dot11RSNAStatsCMACICVErrors */
+               } aes_gmac;
+               struct {
+                       atomic64_t tx_pn;
+                       /* Last received packet number. The first
+                        * IEEE80211_NUM_TIDS counters are used with Data
+                        * frames and the last counter is used with Robust
+                        * Management frames.
+                        */
+                       u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_GCMP_PN_LEN];
+                       struct crypto_aead *tfm;
+                       u32 replays; /* dot11RSNAStatsGCMPReplays */
+               } gcmp;
                struct {
                        /* generic cipher scheme */
                        u8 rx_pn[IEEE80211_NUM_TIDS + 1][MAX_PN_LEN];
index d9ce33663c736f732b18906f90aba0c82c1c25c5..5e09d354c5a52f25a373740cbd54b8dddaf841df 100644 (file)
@@ -658,7 +658,6 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
        bool have_wep = !(IS_ERR(local->wep_tx_tfm) ||
                          IS_ERR(local->wep_rx_tfm));
        bool have_mfp = local->hw.flags & IEEE80211_HW_MFP_CAPABLE;
-       const struct ieee80211_cipher_scheme *cs = local->hw.cipher_schemes;
        int n_suites = 0, r = 0, w = 0;
        u32 *suites;
        static const u32 cipher_suites[] = {
@@ -667,79 +666,109 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
                WLAN_CIPHER_SUITE_WEP104,
                WLAN_CIPHER_SUITE_TKIP,
                WLAN_CIPHER_SUITE_CCMP,
+               WLAN_CIPHER_SUITE_CCMP_256,
+               WLAN_CIPHER_SUITE_GCMP,
+               WLAN_CIPHER_SUITE_GCMP_256,
 
                /* keep last -- depends on hw flags! */
-               WLAN_CIPHER_SUITE_AES_CMAC
+               WLAN_CIPHER_SUITE_AES_CMAC,
+               WLAN_CIPHER_SUITE_BIP_CMAC_256,
+               WLAN_CIPHER_SUITE_BIP_GMAC_128,
+               WLAN_CIPHER_SUITE_BIP_GMAC_256,
        };
 
-       /* Driver specifies the ciphers, we have nothing to do... */
-       if (local->hw.wiphy->cipher_suites && have_wep)
-               return 0;
+       if (local->hw.flags & IEEE80211_HW_SW_CRYPTO_CONTROL ||
+           local->hw.wiphy->cipher_suites) {
+               /* If the driver advertises, or doesn't support SW crypto,
+                * we only need to remove WEP if necessary.
+                */
+               if (have_wep)
+                       return 0;
+
+               /* well if it has _no_ ciphers ... fine */
+               if (!local->hw.wiphy->n_cipher_suites)
+                       return 0;
+
+               /* Driver provides cipher suites, but we need to exclude WEP */
+               suites = kmemdup(local->hw.wiphy->cipher_suites,
+                                sizeof(u32) * local->hw.wiphy->n_cipher_suites,
+                                GFP_KERNEL);
+               if (!suites)
+                       return -ENOMEM;
+
+               for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
+                       u32 suite = local->hw.wiphy->cipher_suites[r];
 
-       /* Set up cipher suites if driver relies on mac80211 cipher defs */
-       if (!local->hw.wiphy->cipher_suites && !cs) {
+                       if (suite == WLAN_CIPHER_SUITE_WEP40 ||
+                           suite == WLAN_CIPHER_SUITE_WEP104)
+                               continue;
+                       suites[w++] = suite;
+               }
+       } else if (!local->hw.cipher_schemes) {
+               /* If the driver doesn't have cipher schemes, there's nothing
+                * else to do other than assign the (software supported and
+                * perhaps offloaded) cipher suites.
+                */
                local->hw.wiphy->cipher_suites = cipher_suites;
                local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
 
                if (!have_mfp)
-                       local->hw.wiphy->n_cipher_suites--;
+                       local->hw.wiphy->n_cipher_suites -= 4;
 
                if (!have_wep) {
                        local->hw.wiphy->cipher_suites += 2;
                        local->hw.wiphy->n_cipher_suites -= 2;
                }
 
+               /* not dynamically allocated, so just return */
                return 0;
-       }
+       } else {
+               const struct ieee80211_cipher_scheme *cs;
 
-       if (!local->hw.wiphy->cipher_suites) {
-               /*
-                * Driver specifies cipher schemes only
-                * We start counting ciphers defined by schemes, TKIP and CCMP
+               cs = local->hw.cipher_schemes;
+
+               /* Driver specifies cipher schemes only (but not cipher suites
+                * including the schemes)
+                *
+                * We start counting ciphers defined by schemes, TKIP, CCMP,
+                * CCMP-256, GCMP, and GCMP-256
                 */
-               n_suites = local->hw.n_cipher_schemes + 2;
+               n_suites = local->hw.n_cipher_schemes + 5;
 
                /* check if we have WEP40 and WEP104 */
                if (have_wep)
                        n_suites += 2;
 
-               /* check if we have AES_CMAC */
+               /* check if we have AES_CMAC, BIP-CMAC-256, BIP-GMAC-128,
+                * BIP-GMAC-256
+                */
                if (have_mfp)
-                       n_suites++;
+                       n_suites += 4;
 
                suites = kmalloc(sizeof(u32) * n_suites, GFP_KERNEL);
                if (!suites)
                        return -ENOMEM;
 
                suites[w++] = WLAN_CIPHER_SUITE_CCMP;
+               suites[w++] = WLAN_CIPHER_SUITE_CCMP_256;
                suites[w++] = WLAN_CIPHER_SUITE_TKIP;
+               suites[w++] = WLAN_CIPHER_SUITE_GCMP;
+               suites[w++] = WLAN_CIPHER_SUITE_GCMP_256;
 
                if (have_wep) {
                        suites[w++] = WLAN_CIPHER_SUITE_WEP40;
                        suites[w++] = WLAN_CIPHER_SUITE_WEP104;
                }
 
-               if (have_mfp)
+               if (have_mfp) {
                        suites[w++] = WLAN_CIPHER_SUITE_AES_CMAC;
+                       suites[w++] = WLAN_CIPHER_SUITE_BIP_CMAC_256;
+                       suites[w++] = WLAN_CIPHER_SUITE_BIP_GMAC_128;
+                       suites[w++] = WLAN_CIPHER_SUITE_BIP_GMAC_256;
+               }
 
                for (r = 0; r < local->hw.n_cipher_schemes; r++)
                        suites[w++] = cs[r].cipher;
-       } else {
-               /* Driver provides cipher suites, but we need to exclude WEP */
-               suites = kmemdup(local->hw.wiphy->cipher_suites,
-                                sizeof(u32) * local->hw.wiphy->n_cipher_suites,
-                                GFP_KERNEL);
-               if (!suites)
-                       return -ENOMEM;
-
-               for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
-                       u32 suite = local->hw.wiphy->cipher_suites[r];
-
-                       if (suite == WLAN_CIPHER_SUITE_WEP40 ||
-                           suite == WLAN_CIPHER_SUITE_WEP104)
-                               continue;
-                       suites[w++] = suite;
-               }
        }
 
        local->hw.wiphy->cipher_suites = suites;
@@ -1041,10 +1070,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                ieee80211_max_network_latency;
        result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
                                     &local->network_latency_notifier);
-       if (result) {
-               rtnl_lock();
+       if (result)
                goto fail_pm_qos;
-       }
 
 #ifdef CONFIG_INET
        local->ifa_notifier.notifier_call = ieee80211_ifa_changed;
@@ -1072,15 +1099,15 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
  fail_ifa:
        pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
                               &local->network_latency_notifier);
-       rtnl_lock();
 #endif
  fail_pm_qos:
-       ieee80211_led_exit(local);
+       rtnl_lock();
+       rate_control_deinitialize(local);
        ieee80211_remove_interfaces(local);
  fail_rate:
        rtnl_unlock();
+       ieee80211_led_exit(local);
        ieee80211_wep_free(local);
-       sta_info_stop(local);
        destroy_workqueue(local->workqueue);
  fail_workqueue:
        wiphy_unregister(local->hw.wiphy);
@@ -1176,6 +1203,8 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
 
        kfree(rcu_access_pointer(local->tx_latency));
 
+       sta_info_stop(local);
+
        wiphy_free(local->hw.wiphy);
 }
 EXPORT_SYMBOL(ieee80211_free_hw);
index fa94ca15ba95ba5cefac929a3031541f6a606d1f..b488e1859b18e8ed7797cffbb5ab2319138fdb28 100644 (file)
@@ -523,13 +523,6 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
            sdata->u.mesh.mshcfg.auto_open_plinks &&
            rssi_threshold_check(sdata, sta))
                changed = mesh_plink_open(sta);
-       else if (sta->plink_state == NL80211_PLINK_LISTEN &&
-                (sdata->u.mesh.user_mpm ||
-                 sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED))
-               cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr,
-                                                  elems->ie_start,
-                                                  elems->total_len,
-                                                  GFP_ATOMIC);
 
        ieee80211_mps_frame_release(sta, elems);
 out:
index 1875181ebd9446923856fc5d5796a28b08a7864d..10ac6324c1d014c708749748ce89ef31055561cf 100644 (file)
@@ -1651,7 +1651,7 @@ __ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       bool ret;
+       bool ret = false;
        int ac;
 
        if (local->hw.queues < IEEE80211_NUM_ACS)
@@ -2011,6 +2011,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        /* disable per-vif ps */
        ieee80211_recalc_ps_vif(sdata);
 
+       /* make sure ongoing transmission finishes */
+       synchronize_net();
+
        /*
         * drop any frame before deauth/disassoc, this can be data or
         * management frame. Since we are disconnecting, we should not
index 8c8c67819072b7653f2850fa5980ceff2787b806..ca405b6b686da37a0e966f09e64b972a2d8ec7e2 100644 (file)
@@ -86,20 +86,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                }
        }
 
-       /* tear down aggregation sessions and remove STAs */
-       mutex_lock(&local->sta_mtx);
-       list_for_each_entry(sta, &local->sta_list, list) {
-               if (sta->uploaded) {
-                       enum ieee80211_sta_state state;
-
-                       state = sta->sta_state;
-                       for (; state > IEEE80211_STA_NOTEXIST; state--)
-                               WARN_ON(drv_sta_state(local, sta->sdata, sta,
-                                                     state, state - 1));
-               }
-       }
-       mutex_unlock(&local->sta_mtx);
-
        /* remove all interfaces that were created in the driver */
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (!ieee80211_sdata_running(sdata))
@@ -111,6 +97,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                case NL80211_IFTYPE_STATION:
                        ieee80211_mgd_quiesce(sdata);
                        break;
+               case NL80211_IFTYPE_WDS:
+                       /* tear down aggregation sessions and remove STAs */
+                       mutex_lock(&local->sta_mtx);
+                       sta = sdata->u.wds.sta;
+                       if (sta && sta->uploaded) {
+                               enum ieee80211_sta_state state;
+
+                               state = sta->sta_state;
+                               for (; state > IEEE80211_STA_NOTEXIST; state--)
+                                       WARN_ON(drv_sta_state(local, sta->sdata,
+                                                             sta, state,
+                                                             state - 1));
+                       }
+                       mutex_unlock(&local->sta_mtx);
+                       break;
                default:
                        break;
                }
index 3d79d498e7f6500b2134fc0117dd07d55615c5a8..1101563357eae365f1e1a1df926ecf36fdc0570b 100644 (file)
@@ -272,7 +272,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
        else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
                channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
        else if (rate)
-               channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
+               channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
        else
                channel_flags |= IEEE80211_CHAN_2GHZ;
        put_unaligned_le16(channel_flags, pos);
@@ -647,6 +647,7 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
 {
        struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
        struct ieee80211_mmie *mmie;
+       struct ieee80211_mmie_16 *mmie16;
 
        if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
                return -1;
@@ -656,11 +657,18 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
 
        mmie = (struct ieee80211_mmie *)
                (skb->data + skb->len - sizeof(*mmie));
-       if (mmie->element_id != WLAN_EID_MMIE ||
-           mmie->length != sizeof(*mmie) - 2)
-               return -1;
-
-       return le16_to_cpu(mmie->key_id);
+       if (mmie->element_id == WLAN_EID_MMIE &&
+           mmie->length == sizeof(*mmie) - 2)
+               return le16_to_cpu(mmie->key_id);
+
+       mmie16 = (struct ieee80211_mmie_16 *)
+               (skb->data + skb->len - sizeof(*mmie16));
+       if (skb->len >= 24 + sizeof(*mmie16) &&
+           mmie16->element_id == WLAN_EID_MMIE &&
+           mmie16->length == sizeof(*mmie16) - 2)
+               return le16_to_cpu(mmie16->key_id);
+
+       return -1;
 }
 
 static int iwl80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs,
@@ -1650,11 +1658,27 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
                result = ieee80211_crypto_tkip_decrypt(rx);
                break;
        case WLAN_CIPHER_SUITE_CCMP:
-               result = ieee80211_crypto_ccmp_decrypt(rx);
+               result = ieee80211_crypto_ccmp_decrypt(
+                       rx, IEEE80211_CCMP_MIC_LEN);
+               break;
+       case WLAN_CIPHER_SUITE_CCMP_256:
+               result = ieee80211_crypto_ccmp_decrypt(
+                       rx, IEEE80211_CCMP_256_MIC_LEN);
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
                result = ieee80211_crypto_aes_cmac_decrypt(rx);
                break;
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+               result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
+               break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               result = ieee80211_crypto_aes_gmac_decrypt(rx);
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               result = ieee80211_crypto_gcmp_decrypt(rx);
+               break;
        default:
                result = ieee80211_crypto_hw_decrypt(rx);
        }
@@ -1781,7 +1805,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
                /* This is the first fragment of a new frame. */
                entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
                                                 rx->seqno_idx, &(rx->skb));
-               if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
+               if (rx->key &&
+                   (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
+                    rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) &&
                    ieee80211_has_protected(fc)) {
                        int queue = rx->security_idx;
                        /* Store CCMP PN so that we can verify that the next
@@ -1810,7 +1836,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
                int i;
                u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
                int queue;
-               if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
+               if (!rx->key ||
+                   (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
+                    rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256))
                        return RX_DROP_UNUSABLE;
                memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
                for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
@@ -2310,12 +2338,12 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
                return RX_DROP_MONITOR;
 
        if (rx->sta) {
-               /* The security index has the same property as needed
+               /* The seqno index has the same property as needed
                 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
                 * for non-QoS-data frames. Here we know it's a data
                 * frame, so count MSDUs.
                 */
-               rx->sta->rx_msdu[rx->security_idx]++;
+               rx->sta->rx_msdu[rx->seqno_idx]++;
        }
 
        /*
index 7807fa42ed3f4fcd8fc6b066cbf89cd987e1e331..05f0d711b6d8666701e91262141fb67711d9dad7 100644 (file)
@@ -828,6 +828,11 @@ void ieee80211_scan_work(struct work_struct *work)
 
        mutex_lock(&local->mtx);
 
+       if (!ieee80211_can_run_worker(local)) {
+               aborted = true;
+               goto out_complete;
+       }
+
        sdata = rcu_dereference_protected(local->scan_sdata,
                                          lockdep_is_held(&local->mtx));
        scan_req = rcu_dereference_protected(local->scan_req,
index 79383ef0c26405eeff3dff62e48ffe364ff16646..00ca8dcc2bcf2d924fb24ed0d4ee674295086aff 100644 (file)
@@ -1764,6 +1764,13 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
 
        sinfo->generation = sdata->local->sta_generation;
 
+       /* do before driver, so beacon filtering drivers have a
+        * chance to e.g. just add the number of filtered beacons
+        * (or just modify the value entirely, of course)
+        */
+       if (sdata->vif.type == NL80211_IFTYPE_STATION)
+               sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal;
+
        drv_sta_statistics(local, sdata, &sta->sta, sinfo);
 
        sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME) |
@@ -1816,6 +1823,13 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
        sinfo->rx_dropped_misc = sta->rx_dropped;
        sinfo->beacon_loss_count = sta->beacon_loss_count;
 
+       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+           !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX) |
+                                BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+               sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif);
+       }
+
        if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
            (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
                if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) {
index 917088dfd69659de6614b327cc0ee23864e429cd..c9f9752217ac8230056e90e28a9b0b02883a87d5 100644 (file)
@@ -345,24 +345,24 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
         */
        sband = local->hw.wiphy->bands[band];
        memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
-       if ((action_code == WLAN_TDLS_SETUP_REQUEST ||
-            action_code == WLAN_TDLS_SETUP_RESPONSE) &&
-           ht_cap.ht_supported && (!sta || sta->sta.ht_cap.ht_supported)) {
-               if (action_code == WLAN_TDLS_SETUP_REQUEST) {
-                       ieee80211_apply_htcap_overrides(sdata, &ht_cap);
-
-                       /* disable SMPS in TDLS initiator */
-                       ht_cap.cap |= (WLAN_HT_CAP_SM_PS_DISABLED
-                                      << IEEE80211_HT_CAP_SM_PS_SHIFT);
-               } else {
-                       /* disable SMPS in TDLS responder */
-                       sta->sta.ht_cap.cap |=
-                               (WLAN_HT_CAP_SM_PS_DISABLED
-                                << IEEE80211_HT_CAP_SM_PS_SHIFT);
-
-                       /* the peer caps are already intersected with our own */
-                       memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap));
-               }
+
+       if (action_code == WLAN_TDLS_SETUP_REQUEST && ht_cap.ht_supported) {
+               ieee80211_apply_htcap_overrides(sdata, &ht_cap);
+
+               /* disable SMPS in TDLS initiator */
+               ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED
+                               << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+               pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+               ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
+       } else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
+                  ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) {
+               /* disable SMPS in TDLS responder */
+               sta->sta.ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED
+                                       << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+               /* the peer caps are already intersected with our own */
+               memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap));
 
                pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
                ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
@@ -852,7 +852,6 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
         */
        if ((action_code == WLAN_TDLS_TEARDOWN) &&
            (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
-               struct sta_info *sta = NULL;
                bool try_resend; /* Should we keep skb for possible resend */
 
                /* If not sending directly to peer - no point in keeping skb */
index 02ed6f60629a5aa123513f759104ca57513d0f5f..88a18ffe2975520edbcc80733bc1bbc9b2655f11 100644 (file)
@@ -626,6 +626,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
                                tx->key = NULL;
                        break;
                case WLAN_CIPHER_SUITE_CCMP:
+               case WLAN_CIPHER_SUITE_CCMP_256:
+               case WLAN_CIPHER_SUITE_GCMP:
+               case WLAN_CIPHER_SUITE_GCMP_256:
                        if (!ieee80211_is_data_present(hdr->frame_control) &&
                            !ieee80211_use_mfp(hdr->frame_control, tx->sta,
                                               tx->skb))
@@ -636,6 +639,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
                                        ieee80211_is_mgmt(hdr->frame_control);
                        break;
                case WLAN_CIPHER_SUITE_AES_CMAC:
+               case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+               case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+               case WLAN_CIPHER_SUITE_BIP_GMAC_256:
                        if (!ieee80211_is_mgmt(hdr->frame_control))
                                tx->key = NULL;
                        break;
@@ -1011,9 +1017,21 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
        case WLAN_CIPHER_SUITE_TKIP:
                return ieee80211_crypto_tkip_encrypt(tx);
        case WLAN_CIPHER_SUITE_CCMP:
-               return ieee80211_crypto_ccmp_encrypt(tx);
+               return ieee80211_crypto_ccmp_encrypt(
+                       tx, IEEE80211_CCMP_MIC_LEN);
+       case WLAN_CIPHER_SUITE_CCMP_256:
+               return ieee80211_crypto_ccmp_encrypt(
+                       tx, IEEE80211_CCMP_256_MIC_LEN);
        case WLAN_CIPHER_SUITE_AES_CMAC:
                return ieee80211_crypto_aes_cmac_encrypt(tx);
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+               return ieee80211_crypto_aes_cmac_256_encrypt(tx);
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               return ieee80211_crypto_aes_gmac_encrypt(tx);
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               return ieee80211_crypto_gcmp_encrypt(tx);
        default:
                return ieee80211_crypto_hw_encrypt(tx);
        }
index fbd37d43dfceb31ccf57da5231930560eeff6b09..8428f4a954795657a32a24f77a0f9c9ae6591b7e 100644 (file)
@@ -744,16 +744,19 @@ EXPORT_SYMBOL_GPL(wdev_to_ieee80211_vif);
 
 /*
  * Nothing should have been stuffed into the workqueue during
- * the suspend->resume cycle. If this WARN is seen then there
- * is a bug with either the driver suspend or something in
- * mac80211 stuffing into the workqueue which we haven't yet
- * cleared during mac80211's suspend cycle.
+ * the suspend->resume cycle. Since we can't check each caller
+ * of this function if we are already quiescing / suspended,
+ * check here and don't WARN since this can actually happen when
+ * the rx path (for example) is racing against __ieee80211_suspend
+ * and suspending / quiescing was set after the rx path checked
+ * them.
  */
 static bool ieee80211_can_queue_work(struct ieee80211_local *local)
 {
-       if (WARN(local->suspended && !local->resuming,
-                "queueing ieee80211 work while going to suspend\n"))
+       if (local->quiescing || (local->suspended && !local->resuming)) {
+               pr_warn("queueing ieee80211 work while going to suspend\n");
                return false;
+       }
 
        return true;
 }
@@ -2057,6 +2060,18 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        mb();
        local->resuming = false;
 
+       /* It's possible that we don't handle the scan completion in
+        * time during suspend, so if it's still marked as completed
+        * here, queue the work and flush it to clean things up.
+        * Instead of calling the worker function directly here, we
+        * really queue it to avoid potential races with other flows
+        * scheduling the same work.
+        */
+       if (test_bit(SCAN_COMPLETED, &local->scanning)) {
+               ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
+               flush_delayed_work(&local->scan_work);
+       }
+
        if (local->open_count && !reconfig_due_to_wowlan)
                drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
 
index 12398fde02e87e7c7eb0eba1430e72287c6bb6f8..75de6fac40d1533fbb9836ab138d71e838ffb6af 100644 (file)
@@ -22,6 +22,8 @@
 #include "tkip.h"
 #include "aes_ccm.h"
 #include "aes_cmac.h"
+#include "aes_gmac.h"
+#include "aes_gcm.h"
 #include "wpa.h"
 
 ieee80211_tx_result
@@ -393,7 +395,8 @@ static inline void ccmp_hdr2pn(u8 *pn, u8 *hdr)
 }
 
 
-static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
+static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
+                           unsigned int mic_len)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_key *key = tx->key;
@@ -424,7 +427,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
        if (info->control.hw_key)
                tail = 0;
        else
-               tail = IEEE80211_CCMP_MIC_LEN;
+               tail = mic_len;
 
        if (WARN_ON(skb_tailroom(skb) < tail ||
                    skb_headroom(skb) < IEEE80211_CCMP_HDR_LEN))
@@ -459,21 +462,22 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
        pos += IEEE80211_CCMP_HDR_LEN;
        ccmp_special_blocks(skb, pn, b_0, aad);
        ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
-                                 skb_put(skb, IEEE80211_CCMP_MIC_LEN));
+                                 skb_put(skb, mic_len), mic_len);
 
        return 0;
 }
 
 
 ieee80211_tx_result
-ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
+ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx,
+                             unsigned int mic_len)
 {
        struct sk_buff *skb;
 
        ieee80211_tx_set_protected(tx);
 
        skb_queue_walk(&tx->skbs, skb) {
-               if (ccmp_encrypt_skb(tx, skb) < 0)
+               if (ccmp_encrypt_skb(tx, skb, mic_len) < 0)
                        return TX_DROP;
        }
 
@@ -482,7 +486,8 @@ ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
 
 
 ieee80211_rx_result
-ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
+ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
+                             unsigned int mic_len)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
        int hdrlen;
@@ -499,8 +504,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
            !ieee80211_is_robust_mgmt_frame(skb))
                return RX_CONTINUE;
 
-       data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN -
-                  IEEE80211_CCMP_MIC_LEN;
+       data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
        if (!rx->sta || data_len < 0)
                return RX_DROP_UNUSABLE;
 
@@ -531,14 +535,14 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
                            key->u.ccmp.tfm, b_0, aad,
                            skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
                            data_len,
-                           skb->data + skb->len - IEEE80211_CCMP_MIC_LEN))
+                           skb->data + skb->len - mic_len, mic_len))
                        return RX_DROP_UNUSABLE;
        }
 
        memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
 
        /* Remove CCMP header and MIC */
-       if (pskb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN))
+       if (pskb_trim(skb, skb->len - mic_len))
                return RX_DROP_UNUSABLE;
        memmove(skb->data + IEEE80211_CCMP_HDR_LEN, skb->data, hdrlen);
        skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
@@ -546,6 +550,229 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
        return RX_CONTINUE;
 }
 
+static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad)
+{
+       __le16 mask_fc;
+       u8 qos_tid;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+       memcpy(j_0, hdr->addr2, ETH_ALEN);
+       memcpy(&j_0[ETH_ALEN], pn, IEEE80211_GCMP_PN_LEN);
+       j_0[13] = 0;
+       j_0[14] = 0;
+       j_0[AES_BLOCK_SIZE - 1] = 0x01;
+
+       /* AAD (extra authenticate-only data) / masked 802.11 header
+        * FC | A1 | A2 | A3 | SC | [A4] | [QC]
+        */
+       put_unaligned_be16(ieee80211_hdrlen(hdr->frame_control) - 2, &aad[0]);
+       /* Mask FC: zero subtype b4 b5 b6 (if not mgmt)
+        * Retry, PwrMgt, MoreData; set Protected
+        */
+       mask_fc = hdr->frame_control;
+       mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY |
+                               IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA);
+       if (!ieee80211_is_mgmt(hdr->frame_control))
+               mask_fc &= ~cpu_to_le16(0x0070);
+       mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+
+       put_unaligned(mask_fc, (__le16 *)&aad[2]);
+       memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN);
+
+       /* Mask Seq#, leave Frag# */
+       aad[22] = *((u8 *)&hdr->seq_ctrl) & 0x0f;
+       aad[23] = 0;
+
+       if (ieee80211_is_data_qos(hdr->frame_control))
+               qos_tid = *ieee80211_get_qos_ctl(hdr) &
+                       IEEE80211_QOS_CTL_TID_MASK;
+       else
+               qos_tid = 0;
+
+       if (ieee80211_has_a4(hdr->frame_control)) {
+               memcpy(&aad[24], hdr->addr4, ETH_ALEN);
+               aad[30] = qos_tid;
+               aad[31] = 0;
+       } else {
+               memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN);
+               aad[24] = qos_tid;
+       }
+}
+
+static inline void gcmp_pn2hdr(u8 *hdr, const u8 *pn, int key_id)
+{
+       hdr[0] = pn[5];
+       hdr[1] = pn[4];
+       hdr[2] = 0;
+       hdr[3] = 0x20 | (key_id << 6);
+       hdr[4] = pn[3];
+       hdr[5] = pn[2];
+       hdr[6] = pn[1];
+       hdr[7] = pn[0];
+}
+
+static inline void gcmp_hdr2pn(u8 *pn, const u8 *hdr)
+{
+       pn[0] = hdr[7];
+       pn[1] = hdr[6];
+       pn[2] = hdr[5];
+       pn[3] = hdr[4];
+       pn[4] = hdr[1];
+       pn[5] = hdr[0];
+}
+
+static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_key *key = tx->key;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       int hdrlen, len, tail;
+       u8 *pos;
+       u8 pn[6];
+       u64 pn64;
+       u8 aad[2 * AES_BLOCK_SIZE];
+       u8 j_0[AES_BLOCK_SIZE];
+
+       if (info->control.hw_key &&
+           !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
+           !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
+           !((info->control.hw_key->flags &
+              IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) &&
+             ieee80211_is_mgmt(hdr->frame_control))) {
+               /* hwaccel has no need for preallocated room for GCMP
+                * header or MIC fields
+                */
+               return 0;
+       }
+
+       hdrlen = ieee80211_hdrlen(hdr->frame_control);
+       len = skb->len - hdrlen;
+
+       if (info->control.hw_key)
+               tail = 0;
+       else
+               tail = IEEE80211_GCMP_MIC_LEN;
+
+       if (WARN_ON(skb_tailroom(skb) < tail ||
+                   skb_headroom(skb) < IEEE80211_GCMP_HDR_LEN))
+               return -1;
+
+       pos = skb_push(skb, IEEE80211_GCMP_HDR_LEN);
+       memmove(pos, pos + IEEE80211_GCMP_HDR_LEN, hdrlen);
+       skb_set_network_header(skb, skb_network_offset(skb) +
+                                   IEEE80211_GCMP_HDR_LEN);
+
+       /* the HW only needs room for the IV, but not the actual IV */
+       if (info->control.hw_key &&
+           (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
+               return 0;
+
+       hdr = (struct ieee80211_hdr *)pos;
+       pos += hdrlen;
+
+       pn64 = atomic64_inc_return(&key->u.gcmp.tx_pn);
+
+       pn[5] = pn64;
+       pn[4] = pn64 >> 8;
+       pn[3] = pn64 >> 16;
+       pn[2] = pn64 >> 24;
+       pn[1] = pn64 >> 32;
+       pn[0] = pn64 >> 40;
+
+       gcmp_pn2hdr(pos, pn, key->conf.keyidx);
+
+       /* hwaccel - with software GCMP header */
+       if (info->control.hw_key)
+               return 0;
+
+       pos += IEEE80211_GCMP_HDR_LEN;
+       gcmp_special_blocks(skb, pn, j_0, aad);
+       ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
+                                 skb_put(skb, IEEE80211_GCMP_MIC_LEN));
+
+       return 0;
+}
+
+ieee80211_tx_result
+ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx)
+{
+       struct sk_buff *skb;
+
+       ieee80211_tx_set_protected(tx);
+
+       skb_queue_walk(&tx->skbs, skb) {
+               if (gcmp_encrypt_skb(tx, skb) < 0)
+                       return TX_DROP;
+       }
+
+       return TX_CONTINUE;
+}
+
+ieee80211_rx_result
+ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+       int hdrlen;
+       struct ieee80211_key *key = rx->key;
+       struct sk_buff *skb = rx->skb;
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       u8 pn[IEEE80211_GCMP_PN_LEN];
+       int data_len;
+       int queue;
+
+       hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+       if (!ieee80211_is_data(hdr->frame_control) &&
+           !ieee80211_is_robust_mgmt_frame(skb))
+               return RX_CONTINUE;
+
+       data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN -
+                  IEEE80211_GCMP_MIC_LEN;
+       if (!rx->sta || data_len < 0)
+               return RX_DROP_UNUSABLE;
+
+       if (status->flag & RX_FLAG_DECRYPTED) {
+               if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN))
+                       return RX_DROP_UNUSABLE;
+       } else {
+               if (skb_linearize(rx->skb))
+                       return RX_DROP_UNUSABLE;
+       }
+
+       gcmp_hdr2pn(pn, skb->data + hdrlen);
+
+       queue = rx->security_idx;
+
+       if (memcmp(pn, key->u.gcmp.rx_pn[queue], IEEE80211_GCMP_PN_LEN) <= 0) {
+               key->u.gcmp.replays++;
+               return RX_DROP_UNUSABLE;
+       }
+
+       if (!(status->flag & RX_FLAG_DECRYPTED)) {
+               u8 aad[2 * AES_BLOCK_SIZE];
+               u8 j_0[AES_BLOCK_SIZE];
+               /* hardware didn't decrypt/verify MIC */
+               gcmp_special_blocks(skb, pn, j_0, aad);
+
+               if (ieee80211_aes_gcm_decrypt(
+                           key->u.gcmp.tfm, j_0, aad,
+                           skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
+                           data_len,
+                           skb->data + skb->len - IEEE80211_GCMP_MIC_LEN))
+                       return RX_DROP_UNUSABLE;
+       }
+
+       memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
+
+       /* Remove GCMP header and MIC */
+       if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN))
+               return RX_DROP_UNUSABLE;
+       memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen);
+       skb_pull(skb, IEEE80211_GCMP_HDR_LEN);
+
+       return RX_CONTINUE;
+}
+
 static ieee80211_tx_result
 ieee80211_crypto_cs_encrypt(struct ieee80211_tx_data *tx,
                            struct sk_buff *skb)
@@ -729,6 +956,48 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
        return TX_CONTINUE;
 }
 
+ieee80211_tx_result
+ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx)
+{
+       struct sk_buff *skb;
+       struct ieee80211_tx_info *info;
+       struct ieee80211_key *key = tx->key;
+       struct ieee80211_mmie_16 *mmie;
+       u8 aad[20];
+       u64 pn64;
+
+       if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
+               return TX_DROP;
+
+       skb = skb_peek(&tx->skbs);
+
+       info = IEEE80211_SKB_CB(skb);
+
+       if (info->control.hw_key)
+               return TX_CONTINUE;
+
+       if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
+               return TX_DROP;
+
+       mmie = (struct ieee80211_mmie_16 *)skb_put(skb, sizeof(*mmie));
+       mmie->element_id = WLAN_EID_MMIE;
+       mmie->length = sizeof(*mmie) - 2;
+       mmie->key_id = cpu_to_le16(key->conf.keyidx);
+
+       /* PN = PN + 1 */
+       pn64 = atomic64_inc_return(&key->u.aes_cmac.tx_pn);
+
+       bip_ipn_set64(mmie->sequence_number, pn64);
+
+       bip_aad(skb, aad);
+
+       /* MIC = AES-256-CMAC(IGTK, AAD || Management Frame Body || MMIE, 128)
+        */
+       ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
+                              skb->data + 24, skb->len - 24, mmie->mic);
+
+       return TX_CONTINUE;
+}
 
 ieee80211_rx_result
 ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
@@ -780,6 +1049,160 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
        return RX_CONTINUE;
 }
 
+ieee80211_rx_result
+ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx)
+{
+       struct sk_buff *skb = rx->skb;
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_key *key = rx->key;
+       struct ieee80211_mmie_16 *mmie;
+       u8 aad[20], mic[16], ipn[6];
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+       if (!ieee80211_is_mgmt(hdr->frame_control))
+               return RX_CONTINUE;
+
+       /* management frames are already linear */
+
+       if (skb->len < 24 + sizeof(*mmie))
+               return RX_DROP_UNUSABLE;
+
+       mmie = (struct ieee80211_mmie_16 *)
+               (skb->data + skb->len - sizeof(*mmie));
+       if (mmie->element_id != WLAN_EID_MMIE ||
+           mmie->length != sizeof(*mmie) - 2)
+               return RX_DROP_UNUSABLE; /* Invalid MMIE */
+
+       bip_ipn_swap(ipn, mmie->sequence_number);
+
+       if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) {
+               key->u.aes_cmac.replays++;
+               return RX_DROP_UNUSABLE;
+       }
+
+       if (!(status->flag & RX_FLAG_DECRYPTED)) {
+               /* hardware didn't decrypt/verify MIC */
+               bip_aad(skb, aad);
+               ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
+                                      skb->data + 24, skb->len - 24, mic);
+               if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+                       key->u.aes_cmac.icverrors++;
+                       return RX_DROP_UNUSABLE;
+               }
+       }
+
+       memcpy(key->u.aes_cmac.rx_pn, ipn, 6);
+
+       /* Remove MMIE */
+       skb_trim(skb, skb->len - sizeof(*mmie));
+
+       return RX_CONTINUE;
+}
+
+ieee80211_tx_result
+ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
+{
+       struct sk_buff *skb;
+       struct ieee80211_tx_info *info;
+       struct ieee80211_key *key = tx->key;
+       struct ieee80211_mmie_16 *mmie;
+       struct ieee80211_hdr *hdr;
+       u8 aad[20];
+       u64 pn64;
+       u8 nonce[12];
+
+       if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
+               return TX_DROP;
+
+       skb = skb_peek(&tx->skbs);
+
+       info = IEEE80211_SKB_CB(skb);
+
+       if (info->control.hw_key)
+               return TX_CONTINUE;
+
+       if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
+               return TX_DROP;
+
+       mmie = (struct ieee80211_mmie_16 *)skb_put(skb, sizeof(*mmie));
+       mmie->element_id = WLAN_EID_MMIE;
+       mmie->length = sizeof(*mmie) - 2;
+       mmie->key_id = cpu_to_le16(key->conf.keyidx);
+
+       /* PN = PN + 1 */
+       pn64 = atomic64_inc_return(&key->u.aes_gmac.tx_pn);
+
+       bip_ipn_set64(mmie->sequence_number, pn64);
+
+       bip_aad(skb, aad);
+
+       hdr = (struct ieee80211_hdr *)skb->data;
+       memcpy(nonce, hdr->addr2, ETH_ALEN);
+       bip_ipn_swap(nonce + ETH_ALEN, mmie->sequence_number);
+
+       /* MIC = AES-GMAC(IGTK, AAD || Management Frame Body || MMIE, 128) */
+       if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
+                              skb->data + 24, skb->len - 24, mmie->mic) < 0)
+               return TX_DROP;
+
+       return TX_CONTINUE;
+}
+
+ieee80211_rx_result
+ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
+{
+       struct sk_buff *skb = rx->skb;
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_key *key = rx->key;
+       struct ieee80211_mmie_16 *mmie;
+       u8 aad[20], mic[16], ipn[6], nonce[12];
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+       if (!ieee80211_is_mgmt(hdr->frame_control))
+               return RX_CONTINUE;
+
+       /* management frames are already linear */
+
+       if (skb->len < 24 + sizeof(*mmie))
+               return RX_DROP_UNUSABLE;
+
+       mmie = (struct ieee80211_mmie_16 *)
+               (skb->data + skb->len - sizeof(*mmie));
+       if (mmie->element_id != WLAN_EID_MMIE ||
+           mmie->length != sizeof(*mmie) - 2)
+               return RX_DROP_UNUSABLE; /* Invalid MMIE */
+
+       bip_ipn_swap(ipn, mmie->sequence_number);
+
+       if (memcmp(ipn, key->u.aes_gmac.rx_pn, 6) <= 0) {
+               key->u.aes_gmac.replays++;
+               return RX_DROP_UNUSABLE;
+       }
+
+       if (!(status->flag & RX_FLAG_DECRYPTED)) {
+               /* hardware didn't decrypt/verify MIC */
+               bip_aad(skb, aad);
+
+               memcpy(nonce, hdr->addr2, ETH_ALEN);
+               memcpy(nonce + ETH_ALEN, ipn, 6);
+
+               if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
+                                      skb->data + 24, skb->len - 24,
+                                      mic) < 0 ||
+                   memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+                       key->u.aes_gmac.icverrors++;
+                       return RX_DROP_UNUSABLE;
+               }
+       }
+
+       memcpy(key->u.aes_gmac.rx_pn, ipn, 6);
+
+       /* Remove MMIE */
+       skb_trim(skb, skb->len - sizeof(*mmie));
+
+       return RX_CONTINUE;
+}
+
 ieee80211_tx_result
 ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx)
 {
index 62e5a12dfe0a24010b32eec210d7a38a1c4c3eb8..d98011ee8f554a512acd0d9711a4ee9bcb7583f0 100644 (file)
@@ -24,17 +24,32 @@ ieee80211_rx_result
 ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx);
 
 ieee80211_tx_result
-ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx);
+ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx,
+                             unsigned int mic_len);
 ieee80211_rx_result
-ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx);
+ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
+                             unsigned int mic_len);
 
 ieee80211_tx_result
 ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx);
+ieee80211_tx_result
+ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx);
 ieee80211_rx_result
 ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx);
+ieee80211_rx_result
+ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx);
+ieee80211_tx_result
+ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx);
+ieee80211_rx_result
+ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx);
 ieee80211_tx_result
 ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx);
 ieee80211_rx_result
 ieee80211_crypto_hw_decrypt(struct ieee80211_rx_data *rx);
 
+ieee80211_tx_result
+ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx);
+ieee80211_rx_result
+ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx);
+
 #endif /* WPA_H */
index 7d31da503dcfcd24d00826d24b121228075ced35..5d9f68c75e5f8f68c5884d467e5a9d16db0edaac 100644 (file)
@@ -51,10 +51,7 @@ ieee802154_add_iface(struct wpan_phy *phy, const char *name,
        struct net_device *err;
 
        err = ieee802154_if_add(local, name, type, extended_addr);
-       if (IS_ERR(err))
-               return PTR_ERR(err);
-
-       return 0;
+       return PTR_ERR_OR_ZERO(err);
 }
 
 static int
index 349295d21946d0072fb6a0a6bada3bd45d54f95f..809df534a7204916f10203ce55dcc3fdc0fe7b51 100644 (file)
@@ -60,14 +60,14 @@ out:
        return segs;
 }
 
-static struct packet_offload mpls_mc_offload = {
+static struct packet_offload mpls_mc_offload __read_mostly = {
        .type = cpu_to_be16(ETH_P_MPLS_MC),
        .callbacks = {
                .gso_segment    =       mpls_gso_segment,
        },
 };
 
-static struct packet_offload mpls_uc_offload = {
+static struct packet_offload mpls_uc_offload __read_mostly = {
        .type = cpu_to_be16(ETH_P_MPLS_UC),
        .callbacks = {
                .gso_segment    =       mpls_gso_segment,
index 990decba1fe418e36e59a1f081fcf0e47188da29..b87ca32efa0b4e6edc7f251c2c32c4ba3b55659c 100644 (file)
@@ -659,16 +659,24 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
        return err;
 }
 
-static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
+static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
+                                unsigned int hooknum)
 {
+       if (!sysctl_snat_reroute(skb))
+               return 0;
+       /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
+       if (NF_INET_LOCAL_IN == hooknum)
+               return 0;
 #ifdef CONFIG_IP_VS_IPV6
        if (af == AF_INET6) {
-               if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0)
+               struct dst_entry *dst = skb_dst(skb);
+
+               if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
+                   ip6_route_me_harder(skb) != 0)
                        return 1;
        } else
 #endif
-               if ((sysctl_snat_reroute(skb) ||
-                    skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
+               if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
                    ip_route_me_harder(skb, RTN_LOCAL) != 0)
                        return 1;
 
@@ -791,7 +799,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
                                union nf_inet_addr *snet,
                                __u8 protocol, struct ip_vs_conn *cp,
                                struct ip_vs_protocol *pp,
-                               unsigned int offset, unsigned int ihl)
+                               unsigned int offset, unsigned int ihl,
+                               unsigned int hooknum)
 {
        unsigned int verdict = NF_DROP;
 
@@ -821,7 +830,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
 #endif
                ip_vs_nat_icmp(skb, pp, cp, 1);
 
-       if (ip_vs_route_me_harder(af, skb))
+       if (ip_vs_route_me_harder(af, skb, hooknum))
                goto out;
 
        /* do the statistics and put it back */
@@ -916,7 +925,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
 
        snet.ip = iph->saddr;
        return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
-                                   pp, ciph.len, ihl);
+                                   pp, ciph.len, ihl, hooknum);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -981,7 +990,8 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
        snet.in6 = ciph.saddr.in6;
        writable = ciph.len;
        return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
-                                   pp, writable, sizeof(struct ipv6hdr));
+                                   pp, writable, sizeof(struct ipv6hdr),
+                                   hooknum);
 }
 #endif
 
@@ -1040,7 +1050,8 @@ static inline bool is_new_conn(const struct sk_buff *skb,
  */
 static unsigned int
 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
-               struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
+               struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
+               unsigned int hooknum)
 {
        struct ip_vs_protocol *pp = pd->pp;
 
@@ -1078,7 +1089,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
         * if it came from this machine itself.  So re-compute
         * the routing information.
         */
-       if (ip_vs_route_me_harder(af, skb))
+       if (ip_vs_route_me_harder(af, skb, hooknum))
                goto drop;
 
        IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
@@ -1181,7 +1192,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
        cp = pp->conn_out_get(af, skb, &iph, 0);
 
        if (likely(cp))
-               return handle_response(af, skb, pd, cp, &iph);
+               return handle_response(af, skb, pd, cp, &iph, hooknum);
        if (sysctl_nat_icmp_send(net) &&
            (pp->protocol == IPPROTO_TCP ||
             pp->protocol == IPPROTO_UDP ||
index b8295a430a5600d35b6de4163ba3b98e75c5f28c..e55759056361c47ed1fcfa5c656541ba39bfd260 100644 (file)
@@ -2887,7 +2887,8 @@ static int ip_vs_genl_dump_service(struct sk_buff *skb,
        if (ip_vs_genl_fill_service(skb, svc) < 0)
                goto nla_put_failure;
 
-       return genlmsg_end(skb, hdr);
+       genlmsg_end(skb, hdr);
+       return 0;
 
 nla_put_failure:
        genlmsg_cancel(skb, hdr);
@@ -3079,7 +3080,8 @@ static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest,
        if (ip_vs_genl_fill_dest(skb, dest) < 0)
                goto nla_put_failure;
 
-       return genlmsg_end(skb, hdr);
+       genlmsg_end(skb, hdr);
+       return 0;
 
 nla_put_failure:
        genlmsg_cancel(skb, hdr);
@@ -3215,7 +3217,8 @@ static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __u32 state,
        if (ip_vs_genl_fill_daemon(skb, state, mcast_ifn, syncid))
                goto nla_put_failure;
 
-       return genlmsg_end(skb, hdr);
+       genlmsg_end(skb, hdr);
+       return 0;
 
 nla_put_failure:
        genlmsg_cancel(skb, hdr);
index 1d5341f3761dfe1e57cc6505493bf270cca672de..5d3daae98bf0be1bc0fa699ec96208d2fa3da1d4 100644 (file)
@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
        struct nf_conn *ct;
        struct net *net;
 
+       *diff = 0;
+
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
         * so turn this into a no-op for IPv6 packets
@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
                return 1;
 #endif
 
-       *diff = 0;
-
        /* Only useful for established sessions */
        if (cp->state != IP_VS_TCP_S_ESTABLISHED)
                return 1;
@@ -322,6 +322,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
        struct ip_vs_conn *n_cp;
        struct net *net;
 
+       /* no diff required for incoming packets */
+       *diff = 0;
+
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
         * so turn this into a no-op for IPv6 packets
@@ -330,9 +333,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
                return 1;
 #endif
 
-       /* no diff required for incoming packets */
-       *diff = 0;
-
        /* Only useful for established sessions */
        if (cp->state != IP_VS_TCP_S_ESTABLISHED)
                return 1;
index a11674806707e18fb9d86860ad0717dfda0b0da2..13fad8668f83d7ea4c7862b4c24d1234475ac37d 100644 (file)
@@ -611,16 +611,15 @@ __nf_conntrack_confirm(struct sk_buff *skb)
         */
        NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
        pr_debug("Confirming conntrack %p\n", ct);
-       /* We have to check the DYING flag inside the lock to prevent
-          a race against nf_ct_get_next_corpse() possibly called from
-          user context, else we insert an already 'dead' hash, blocking
-          further use of that particular connection -JM */
+       /* We have to check the DYING flag after unlink to prevent
+        * a race against nf_ct_get_next_corpse() possibly called from
+        * user context, else we insert an already 'dead' hash, blocking
+        * further use of that particular connection -JM.
+        */
+       nf_ct_del_from_dying_or_unconfirmed_list(ct);
 
-       if (unlikely(nf_ct_is_dying(ct))) {
-               nf_conntrack_double_unlock(hash, reply_hash);
-               local_bh_enable();
-               return NF_ACCEPT;
-       }
+       if (unlikely(nf_ct_is_dying(ct)))
+               goto out;
 
        /* See if there's one in the list already, including reverse:
           NAT could have grabbed it without realizing, since we're
@@ -636,8 +635,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
                        goto out;
 
-       nf_ct_del_from_dying_or_unconfirmed_list(ct);
-
        /* Timer relative to confirmation time, not original
           setting time, otherwise we'd get timer wrap in
           weird delay cases. */
@@ -673,6 +670,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        return NF_ACCEPT;
 
 out:
+       nf_ct_add_to_dying_list(ct);
        nf_conntrack_double_unlock(hash, reply_hash);
        NF_CT_STAT_INC(net, insert_failed);
        local_bh_enable();
@@ -1426,12 +1424,6 @@ void nf_ct_free_hashtable(void *hash, unsigned int size)
 }
 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
 
-void nf_conntrack_flush_report(struct net *net, u32 portid, int report)
-{
-       nf_ct_iterate_cleanup(net, kill_all, NULL, portid, report);
-}
-EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
-
 static int untrack_refs(void)
 {
        int cnt = 0, cpu;
@@ -1624,13 +1616,18 @@ int nf_conntrack_init_start(void)
        for (i = 0; i < CONNTRACK_LOCKS; i++)
                spin_lock_init(&nf_conntrack_locks[i]);
 
-       /* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
-        * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
        if (!nf_conntrack_htable_size) {
+               /* Idea from tcp.c: use 1/16384 of memory.
+                * On i386: 32MB machine has 512 buckets.
+                * >= 1GB machines have 16384 buckets.
+                * >= 4GB machines have 65536 buckets.
+                */
                nf_conntrack_htable_size
                        = (((totalram_pages << PAGE_SHIFT) / 16384)
                           / sizeof(struct hlist_head));
-               if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
+               if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
+                       nf_conntrack_htable_size = 65536;
+               else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
                        nf_conntrack_htable_size = 16384;
                if (nf_conntrack_htable_size < 32)
                        nf_conntrack_htable_size = 32;
index 1bd9ed9e62f642477e8a10d57cd73775cc1ebc14..d1c23940a86ad96cddbf1aefe0747c43fddca920 100644 (file)
@@ -749,13 +749,47 @@ static int ctnetlink_done(struct netlink_callback *cb)
        return 0;
 }
 
-struct ctnetlink_dump_filter {
+struct ctnetlink_filter {
        struct {
                u_int32_t val;
                u_int32_t mask;
        } mark;
 };
 
+static struct ctnetlink_filter *
+ctnetlink_alloc_filter(const struct nlattr * const cda[])
+{
+#ifdef CONFIG_NF_CONNTRACK_MARK
+       struct ctnetlink_filter *filter;
+
+       filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+       if (filter == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
+       filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
+
+       return filter;
+#else
+       return ERR_PTR(-EOPNOTSUPP);
+#endif
+}
+
+static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
+{
+       struct ctnetlink_filter *filter = data;
+
+       if (filter == NULL)
+               return 1;
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+       if ((ct->mark & filter->mark.mask) == filter->mark.val)
+               return 1;
+#endif
+
+       return 0;
+}
+
 static int
 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
 {
@@ -768,10 +802,6 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        int res;
        spinlock_t *lockp;
 
-#ifdef CONFIG_NF_CONNTRACK_MARK
-       const struct ctnetlink_dump_filter *filter = cb->data;
-#endif
-
        last = (struct nf_conn *)cb->args[1];
 
        local_bh_disable();
@@ -798,12 +828,9 @@ restart:
                                        continue;
                                cb->args[1] = 0;
                        }
-#ifdef CONFIG_NF_CONNTRACK_MARK
-                       if (filter && !((ct->mark & filter->mark.mask) ==
-                                       filter->mark.val)) {
+                       if (!ctnetlink_filter_match(ct, cb->data))
                                continue;
-                       }
-#endif
+
                        rcu_read_lock();
                        res =
                        ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
@@ -1001,6 +1028,25 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
                                    .len = NF_CT_LABELS_MAX_SIZE },
 };
 
+static int ctnetlink_flush_conntrack(struct net *net,
+                                    const struct nlattr * const cda[],
+                                    u32 portid, int report)
+{
+       struct ctnetlink_filter *filter = NULL;
+
+       if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
+               filter = ctnetlink_alloc_filter(cda);
+               if (IS_ERR(filter))
+                       return PTR_ERR(filter);
+       }
+
+       nf_ct_iterate_cleanup(net, ctnetlink_filter_match, filter,
+                             portid, report);
+       kfree(filter);
+
+       return 0;
+}
+
 static int
 ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
                        const struct nlmsghdr *nlh,
@@ -1024,11 +1070,9 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
        else if (cda[CTA_TUPLE_REPLY])
                err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
        else {
-               /* Flush the whole table */
-               nf_conntrack_flush_report(net,
-                                        NETLINK_CB(skb).portid,
-                                        nlmsg_report(nlh));
-               return 0;
+               return ctnetlink_flush_conntrack(net, cda,
+                                                NETLINK_CB(skb).portid,
+                                                nlmsg_report(nlh));
        }
 
        if (err < 0)
@@ -1076,21 +1120,16 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
                        .dump = ctnetlink_dump_table,
                        .done = ctnetlink_done,
                };
-#ifdef CONFIG_NF_CONNTRACK_MARK
+
                if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
-                       struct ctnetlink_dump_filter *filter;
+                       struct ctnetlink_filter *filter;
 
-                       filter = kzalloc(sizeof(struct ctnetlink_dump_filter),
-                                        GFP_ATOMIC);
-                       if (filter == NULL)
-                               return -ENOMEM;
+                       filter = ctnetlink_alloc_filter(cda);
+                       if (IS_ERR(filter))
+                               return PTR_ERR(filter);
 
-                       filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
-                       filter->mark.mask =
-                               ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
                        c.data = filter;
                }
-#endif
                return netlink_dump_start(ctnl, skb, nlh, &c);
        }
 
index f6e2ae91a80badd697a1f77e299dc332bc34a8d1..ce3e840c870452b705744f9b64fb32661b6c82a5 100644 (file)
@@ -98,9 +98,9 @@ static void nf_ct_sack_block_adjust(struct sk_buff *skb,
                        new_end_seq = htonl(ntohl(sack->end_seq) -
                                      seq->offset_before);
 
-               pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
-                        ntohl(sack->start_seq), new_start_seq,
-                        ntohl(sack->end_seq), new_end_seq);
+               pr_debug("sack_adjust: start_seq: %u->%u, end_seq: %u->%u\n",
+                        ntohl(sack->start_seq), ntohl(new_start_seq),
+                        ntohl(sack->end_seq), ntohl(new_end_seq));
 
                inet_proto_csum_replace4(&tcph->check, skb,
                                         sack->start_seq, new_start_seq, 0);
index 43c926cae9c00de1306144abcddcf64f510c2864..0d8448f19dfe982f64879d9dc55ba7d746696196 100644 (file)
@@ -425,8 +425,7 @@ static int netfilter_log_sysctl_init(struct net *net)
                        nf_log_sysctl_table[i].procname =
                                nf_log_sysctl_fnames[i];
                        nf_log_sysctl_table[i].data = NULL;
-                       nf_log_sysctl_table[i].maxlen =
-                               NFLOGGER_NAME_LEN * sizeof(char);
+                       nf_log_sysctl_table[i].maxlen = NFLOGGER_NAME_LEN;
                        nf_log_sysctl_table[i].mode = 0644;
                        nf_log_sysctl_table[i].proc_handler =
                                nf_log_proc_dostring;
index 129a8daa4abf31959801e99c4f2fbfc7f1aab230..199fd0f27b0e128cfb8674ca331c2dae240e1b1c 100644 (file)
@@ -427,7 +427,8 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
            nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)))
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_trim(skb, nlh);
@@ -713,16 +714,12 @@ static int nft_flush_table(struct nft_ctx *ctx)
        struct nft_chain *chain, *nc;
        struct nft_set *set, *ns;
 
-       list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
+       list_for_each_entry(chain, &ctx->table->chains, list) {
                ctx->chain = chain;
 
                err = nft_delrule_by_chain(ctx);
                if (err < 0)
                        goto out;
-
-               err = nft_delchain(ctx);
-               if (err < 0)
-                       goto out;
        }
 
        list_for_each_entry_safe(set, ns, &ctx->table->sets, list) {
@@ -735,6 +732,14 @@ static int nft_flush_table(struct nft_ctx *ctx)
                        goto out;
        }
 
+       list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
+               ctx->chain = chain;
+
+               err = nft_delchain(ctx);
+               if (err < 0)
+                       goto out;
+       }
+
        err = nft_deltable(ctx);
 out:
        return err;
@@ -967,7 +972,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
        if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_trim(skb, nlh);
@@ -1130,9 +1136,11 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
        /* Restore old counters on this cpu, no problem. Per-cpu statistics
         * are not exposed to userspace.
         */
+       preempt_disable();
        stats = this_cpu_ptr(newstats);
        stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
        stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+       preempt_enable();
 
        return newstats;
 }
@@ -1258,8 +1266,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
                trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN,
                                        sizeof(struct nft_trans_chain));
-               if (trans == NULL)
+               if (trans == NULL) {
+                       free_percpu(stats);
                        return -ENOMEM;
+               }
 
                nft_trans_chain_stats(trans) = stats;
                nft_trans_chain_update(trans) = true;
@@ -1315,8 +1325,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                hookfn = type->hooks[hooknum];
 
                basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
-               if (basechain == NULL)
+               if (basechain == NULL) {
+                       module_put(type->owner);
                        return -ENOMEM;
+               }
 
                if (nla[NFTA_CHAIN_COUNTERS]) {
                        stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
@@ -1703,7 +1715,8 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
            nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule)))
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_trim(skb, nlh);
@@ -2357,7 +2370,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
                goto nla_put_failure;
        nla_nest_end(skb, desc);
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_trim(skb, nlh);
@@ -3031,7 +3045,8 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb,
 
        nla_nest_end(skb, nest);
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_trim(skb, nlh);
@@ -3320,7 +3335,8 @@ static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
        if (nla_put_be32(skb, NFTA_GEN_ID, htonl(net->nft.base_seq)))
                goto nla_put_failure;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_trim(skb, nlh);
@@ -3749,6 +3765,24 @@ int nft_chain_validate_dependency(const struct nft_chain *chain,
 }
 EXPORT_SYMBOL_GPL(nft_chain_validate_dependency);
 
+int nft_chain_validate_hooks(const struct nft_chain *chain,
+                            unsigned int hook_flags)
+{
+       struct nft_base_chain *basechain;
+
+       if (chain->flags & NFT_BASE_CHAIN) {
+               basechain = nft_base_chain(chain);
+
+               if ((1 << basechain->ops[0].hooknum) & hook_flags)
+                       return 0;
+
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nft_chain_validate_hooks);
+
 /*
  * Loop detection - walk through the ruleset beginning at the destination chain
  * of a new jump until either the source chain is reached (loop) or all
index cde4a6702fa3199421d6ced324e2bb74fdaa46ab..8b117c90ecd765ac1b7242c58f36ef7531a49b6e 100644 (file)
@@ -272,7 +272,7 @@ static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
 static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
                                u_int16_t subsys_id)
 {
-       struct sk_buff *nskb, *oskb = skb;
+       struct sk_buff *oskb = skb;
        struct net *net = sock_net(skb->sk);
        const struct nfnetlink_subsystem *ss;
        const struct nfnl_callback *nc;
@@ -283,12 +283,11 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (subsys_id >= NFNL_SUBSYS_COUNT)
                return netlink_ack(skb, nlh, -EINVAL);
 replay:
-       nskb = netlink_skb_clone(oskb, GFP_KERNEL);
-       if (!nskb)
+       skb = netlink_skb_clone(oskb, GFP_KERNEL);
+       if (!skb)
                return netlink_ack(oskb, nlh, -ENOMEM);
 
-       nskb->sk = oskb->sk;
-       skb = nskb;
+       skb->sk = oskb->sk;
 
        nfnl_lock(subsys_id);
        ss = rcu_dereference_protected(table[subsys_id].subsys,
@@ -305,7 +304,7 @@ replay:
                {
                        nfnl_unlock(subsys_id);
                        netlink_ack(skb, nlh, -EOPNOTSUPP);
-                       return kfree_skb(nskb);
+                       return kfree_skb(skb);
                }
        }
 
@@ -321,7 +320,8 @@ replay:
                nlh = nlmsg_hdr(skb);
                err = 0;
 
-               if (nlh->nlmsg_len < NLMSG_HDRLEN) {
+               if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
+                   skb->len < nlh->nlmsg_len) {
                        err = -EINVAL;
                        goto ack;
                }
@@ -385,7 +385,7 @@ replay:
                                nfnl_err_reset(&err_list);
                                ss->abort(oskb);
                                nfnl_unlock(subsys_id);
-                               kfree_skb(nskb);
+                               kfree_skb(skb);
                                goto replay;
                        }
                }
@@ -426,7 +426,7 @@ done:
 
        nfnl_err_deliver(&err_list, oskb);
        nfnl_unlock(subsys_id);
-       kfree_skb(nskb);
+       kfree_skb(skb);
 }
 
 static void nfnetlink_rcv(struct sk_buff *skb)
@@ -469,7 +469,7 @@ static int nfnetlink_bind(struct net *net, int group)
        int type;
 
        if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
-               return -EINVAL;
+               return 0;
 
        type = nfnl_group2type[group];
 
index 9e287cb56a04bf35ec9a7d5123f58396a9440fb1..a5599fc51a6f3f87db4d63ecd20073481520a238 100644 (file)
@@ -86,7 +86,7 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
 static int
 nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
 {
-       const struct nf_conn_help *help = nfct_help(ct);
+       struct nf_conn_help *help = nfct_help(ct);
 
        if (attr == NULL)
                return -EINVAL;
@@ -94,7 +94,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
        if (help->helper->data_len == 0)
                return -EINVAL;
 
-       memcpy(&help->data, nla_data(attr), help->helper->data_len);
+       memcpy(help->data, nla_data(attr), help->helper->data_len);
        return 0;
 }
 
index 1e316ce4cb5dedc6d4a97c6589cc4b6d19866dcd..61e6c407476a618df386c2f14839033398aae14b 100644 (file)
@@ -33,7 +33,7 @@ static bool nft_hash_lookup(const struct nft_set *set,
                            const struct nft_data *key,
                            struct nft_data *data)
 {
-       const struct rhashtable *priv = nft_set_priv(set);
+       struct rhashtable *priv = nft_set_priv(set);
        const struct nft_hash_elem *he;
 
        he = rhashtable_lookup(priv, key);
@@ -83,69 +83,97 @@ static void nft_hash_remove(const struct nft_set *set,
                            const struct nft_set_elem *elem)
 {
        struct rhashtable *priv = nft_set_priv(set);
-       struct rhash_head *he, __rcu **pprev;
 
-       pprev = elem->cookie;
-       he = rht_dereference((*pprev), priv);
+       rhashtable_remove(priv, elem->cookie);
+       synchronize_rcu();
+       kfree(elem->cookie);
+}
 
-       rhashtable_remove_pprev(priv, he, pprev);
+struct nft_compare_arg {
+       const struct nft_set *set;
+       struct nft_set_elem *elem;
+};
 
-       synchronize_rcu();
-       kfree(he);
+static bool nft_hash_compare(void *ptr, void *arg)
+{
+       struct nft_hash_elem *he = ptr;
+       struct nft_compare_arg *x = arg;
+
+       if (!nft_data_cmp(&he->key, &x->elem->key, x->set->klen)) {
+               x->elem->cookie = he;
+               x->elem->flags = 0;
+               if (x->set->flags & NFT_SET_MAP)
+                       nft_data_copy(&x->elem->data, he->data);
+
+               return true;
+       }
+
+       return false;
 }
 
 static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
 {
-       const struct rhashtable *priv = nft_set_priv(set);
-       const struct bucket_table *tbl = rht_dereference_rcu(priv->tbl, priv);
-       struct rhash_head __rcu * const *pprev;
-       struct nft_hash_elem *he;
-       u32 h;
-
-       h = rhashtable_hashfn(priv, &elem->key, set->klen);
-       pprev = &tbl->buckets[h];
-       rht_for_each_entry_rcu(he, tbl->buckets[h], node) {
-               if (nft_data_cmp(&he->key, &elem->key, set->klen)) {
-                       pprev = &he->node.next;
-                       continue;
-               }
+       struct rhashtable *priv = nft_set_priv(set);
+       struct nft_compare_arg arg = {
+               .set = set,
+               .elem = elem,
+       };
 
-               elem->cookie = (void *)pprev;
-               elem->flags = 0;
-               if (set->flags & NFT_SET_MAP)
-                       nft_data_copy(&elem->data, he->data);
+       if (rhashtable_lookup_compare(priv, &elem->key,
+                                     &nft_hash_compare, &arg))
                return 0;
-       }
+
        return -ENOENT;
 }
 
 static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
                          struct nft_set_iter *iter)
 {
-       const struct rhashtable *priv = nft_set_priv(set);
-       const struct bucket_table *tbl;
+       struct rhashtable *priv = nft_set_priv(set);
        const struct nft_hash_elem *he;
+       struct rhashtable_iter hti;
        struct nft_set_elem elem;
-       unsigned int i;
+       int err;
 
-       tbl = rht_dereference_rcu(priv->tbl, priv);
-       for (i = 0; i < tbl->size; i++) {
-               rht_for_each_entry_rcu(he, tbl->buckets[i], node) {
-                       if (iter->count < iter->skip)
-                               goto cont;
-
-                       memcpy(&elem.key, &he->key, sizeof(elem.key));
-                       if (set->flags & NFT_SET_MAP)
-                               memcpy(&elem.data, he->data, sizeof(elem.data));
-                       elem.flags = 0;
-
-                       iter->err = iter->fn(ctx, set, iter, &elem);
-                       if (iter->err < 0)
-                               return;
-cont:
-                       iter->count++;
+       err = rhashtable_walk_init(priv, &hti);
+       iter->err = err;
+       if (err)
+               return;
+
+       err = rhashtable_walk_start(&hti);
+       if (err && err != -EAGAIN) {
+               iter->err = err;
+               goto out;
+       }
+
+       while ((he = rhashtable_walk_next(&hti))) {
+               if (IS_ERR(he)) {
+                       err = PTR_ERR(he);
+                       if (err != -EAGAIN) {
+                               iter->err = err;
+                               goto out;
+                       }
                }
+
+               if (iter->count < iter->skip)
+                       goto cont;
+
+               memcpy(&elem.key, &he->key, sizeof(elem.key));
+               if (set->flags & NFT_SET_MAP)
+                       memcpy(&elem.data, he->data, sizeof(elem.data));
+               elem.flags = 0;
+
+               iter->err = iter->fn(ctx, set, iter, &elem);
+               if (iter->err < 0)
+                       goto out;
+
+cont:
+               iter->count++;
        }
+
+out:
+       rhashtable_walk_stop(&hti);
+       rhashtable_walk_exit(&hti);
 }
 
 static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
@@ -153,13 +181,6 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
        return sizeof(struct rhashtable);
 }
 
-#ifdef CONFIG_PROVE_LOCKING
-static int lockdep_nfnl_lock_is_held(void *parent)
-{
-       return lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES);
-}
-#endif
-
 static int nft_hash_init(const struct nft_set *set,
                         const struct nft_set_desc *desc,
                         const struct nlattr * const tb[])
@@ -173,9 +194,6 @@ static int nft_hash_init(const struct nft_set *set,
                .hashfn = jhash,
                .grow_decision = rht_grow_above_75,
                .shrink_decision = rht_shrink_below_30,
-#ifdef CONFIG_PROVE_LOCKING
-               .mutex_is_held = lockdep_nfnl_lock_is_held,
-#endif
        };
 
        return rhashtable_init(priv, &params);
@@ -183,18 +201,23 @@ static int nft_hash_init(const struct nft_set *set,
 
 static void nft_hash_destroy(const struct nft_set *set)
 {
-       const struct rhashtable *priv = nft_set_priv(set);
-       const struct bucket_table *tbl = priv->tbl;
-       struct nft_hash_elem *he, *next;
+       struct rhashtable *priv = nft_set_priv(set);
+       const struct bucket_table *tbl;
+       struct nft_hash_elem *he;
+       struct rhash_head *pos, *next;
        unsigned int i;
 
+       /* Stop an eventual async resizing */
+       priv->being_destroyed = true;
+       mutex_lock(&priv->mutex);
+
+       tbl = rht_dereference(priv->tbl, priv);
        for (i = 0; i < tbl->size; i++) {
-               for (he = rht_entry(tbl->buckets[i], struct nft_hash_elem, node);
-                    he != NULL; he = next) {
-                       next = rht_entry(he->node.next, struct nft_hash_elem, node);
+               rht_for_each_entry_safe(he, pos, next, tbl, i, node)
                        nft_hash_elem_destroy(set, he);
-               }
        }
+       mutex_unlock(&priv->mutex);
+
        rhashtable_destroy(priv);
 }
 
index d1ffd5eb3a9b5b86495b53adb5b8bc27c17921df..9aea747b43eab7b91b3b458cd0a5b89f2d9e6927 100644 (file)
@@ -21,6 +21,21 @@ const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
 };
 EXPORT_SYMBOL_GPL(nft_masq_policy);
 
+int nft_masq_validate(const struct nft_ctx *ctx,
+                     const struct nft_expr *expr,
+                     const struct nft_data **data)
+{
+       int err;
+
+       err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+       if (err < 0)
+               return err;
+
+       return nft_chain_validate_hooks(ctx->chain,
+                                       (1 << NF_INET_POST_ROUTING));
+}
+EXPORT_SYMBOL_GPL(nft_masq_validate);
+
 int nft_masq_init(const struct nft_ctx *ctx,
                  const struct nft_expr *expr,
                  const struct nlattr * const tb[])
@@ -28,8 +43,8 @@ int nft_masq_init(const struct nft_ctx *ctx,
        struct nft_masq *priv = nft_expr_priv(expr);
        int err;
 
-       err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
-       if (err < 0)
+       err = nft_masq_validate(ctx, expr, NULL);
+       if (err)
                return err;
 
        if (tb[NFTA_MASQ_FLAGS] == NULL)
@@ -60,12 +75,5 @@ nla_put_failure:
 }
 EXPORT_SYMBOL_GPL(nft_masq_dump);
 
-int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
-                     const struct nft_data **data)
-{
-       return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
-}
-EXPORT_SYMBOL_GPL(nft_masq_validate);
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
index afe2b0b45ec41f82df6f2430958a4392aa5eb608..a0837c6c9283dc90b043750ffe1d7217c95ab239 100644 (file)
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
        }
 
        if (priv->sreg_proto_min) {
-               range.min_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_min].data[0];
-               range.max_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_max].data[0];
+               range.min_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_min].data[0];
+               range.max_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_max].data[0];
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
@@ -88,17 +88,40 @@ static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
        [NFTA_NAT_FLAGS]         = { .type = NLA_U32 },
 };
 
-static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
-                       const struct nlattr * const tb[])
+static int nft_nat_validate(const struct nft_ctx *ctx,
+                           const struct nft_expr *expr,
+                           const struct nft_data **data)
 {
        struct nft_nat *priv = nft_expr_priv(expr);
-       u32 family;
        int err;
 
        err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
        if (err < 0)
                return err;
 
+       switch (priv->type) {
+       case NFT_NAT_SNAT:
+               err = nft_chain_validate_hooks(ctx->chain,
+                                              (1 << NF_INET_POST_ROUTING) |
+                                              (1 << NF_INET_LOCAL_IN));
+               break;
+       case NFT_NAT_DNAT:
+               err = nft_chain_validate_hooks(ctx->chain,
+                                              (1 << NF_INET_PRE_ROUTING) |
+                                              (1 << NF_INET_LOCAL_OUT));
+               break;
+       }
+
+       return err;
+}
+
+static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+                       const struct nlattr * const tb[])
+{
+       struct nft_nat *priv = nft_expr_priv(expr);
+       u32 family;
+       int err;
+
        if (tb[NFTA_NAT_TYPE] == NULL ||
            (tb[NFTA_NAT_REG_ADDR_MIN] == NULL &&
             tb[NFTA_NAT_REG_PROTO_MIN] == NULL))
@@ -115,6 +138,10 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                return -EINVAL;
        }
 
+       err = nft_nat_validate(ctx, expr, NULL);
+       if (err < 0)
+               return err;
+
        if (tb[NFTA_NAT_FAMILY] == NULL)
                return -EINVAL;
 
@@ -219,13 +246,6 @@ nla_put_failure:
        return -1;
 }
 
-static int nft_nat_validate(const struct nft_ctx *ctx,
-                           const struct nft_expr *expr,
-                           const struct nft_data **data)
-{
-       return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
-}
-
 static struct nft_expr_type nft_nat_type;
 static const struct nft_expr_ops nft_nat_ops = {
        .type           = &nft_nat_type,
index 9e8093f283113117ac7618c742624742e618ed3a..d7e9e93a4e90f498f7a002c33840c928a3ab17e2 100644 (file)
@@ -23,6 +23,22 @@ const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = {
 };
 EXPORT_SYMBOL_GPL(nft_redir_policy);
 
+int nft_redir_validate(const struct nft_ctx *ctx,
+                      const struct nft_expr *expr,
+                      const struct nft_data **data)
+{
+       int err;
+
+       err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+       if (err < 0)
+               return err;
+
+       return nft_chain_validate_hooks(ctx->chain,
+                                       (1 << NF_INET_PRE_ROUTING) |
+                                       (1 << NF_INET_LOCAL_OUT));
+}
+EXPORT_SYMBOL_GPL(nft_redir_validate);
+
 int nft_redir_init(const struct nft_ctx *ctx,
                   const struct nft_expr *expr,
                   const struct nlattr * const tb[])
@@ -30,7 +46,7 @@ int nft_redir_init(const struct nft_ctx *ctx,
        struct nft_redir *priv = nft_expr_priv(expr);
        int err;
 
-       err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+       err = nft_redir_validate(ctx, expr, NULL);
        if (err < 0)
                return err;
 
@@ -88,12 +104,5 @@ nla_put_failure:
 }
 EXPORT_SYMBOL_GPL(nft_redir_dump);
 
-int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
-                      const struct nft_data **data)
-{
-       return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
-}
-EXPORT_SYMBOL_GPL(nft_redir_validate);
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
index c529161cdbf8906eeacacca22358ee26d25d3f9f..0778855ea5e75dff20f77f58a23c1f751c5ae45d 100644 (file)
@@ -225,6 +225,8 @@ xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
 
        rcu_read_lock();
        list_for_each_entry_rcu(kf, &xt_osf_fingers[df], finger_entry) {
+               int foptsize, optnum;
+
                f = &kf->finger;
 
                if (!(info->flags & XT_OSF_LOG) && strcmp(info->genre, f->genre))
@@ -233,110 +235,109 @@ xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
                optp = _optp;
                fmatch = FMATCH_WRONG;
 
-               if (totlen == f->ss && xt_osf_ttl(skb, info, f->ttl)) {
-                       int foptsize, optnum;
+               if (totlen != f->ss || !xt_osf_ttl(skb, info, f->ttl))
+                       continue;
 
-                       /*
-                        * Should not happen if userspace parser was written correctly.
-                        */
-                       if (f->wss.wc >= OSF_WSS_MAX)
-                               continue;
+               /*
+                * Should not happen if userspace parser was written correctly.
+                */
+               if (f->wss.wc >= OSF_WSS_MAX)
+                       continue;
 
-                       /* Check options */
+               /* Check options */
 
-                       foptsize = 0;
-                       for (optnum = 0; optnum < f->opt_num; ++optnum)
-                               foptsize += f->opt[optnum].length;
+               foptsize = 0;
+               for (optnum = 0; optnum < f->opt_num; ++optnum)
+                       foptsize += f->opt[optnum].length;
 
-                       if (foptsize > MAX_IPOPTLEN ||
-                               optsize > MAX_IPOPTLEN ||
-                               optsize != foptsize)
-                               continue;
+               if (foptsize > MAX_IPOPTLEN ||
+                   optsize > MAX_IPOPTLEN ||
+                   optsize != foptsize)
+                       continue;
 
-                       check_WSS = f->wss.wc;
+               check_WSS = f->wss.wc;
 
-                       for (optnum = 0; optnum < f->opt_num; ++optnum) {
-                               if (f->opt[optnum].kind == (*optp)) {
-                                       __u32 len = f->opt[optnum].length;
-                                       const __u8 *optend = optp + len;
-                                       int loop_cont = 0;
+               for (optnum = 0; optnum < f->opt_num; ++optnum) {
+                       if (f->opt[optnum].kind == (*optp)) {
+                               __u32 len = f->opt[optnum].length;
+                               const __u8 *optend = optp + len;
+                               int loop_cont = 0;
 
-                                       fmatch = FMATCH_OK;
+                               fmatch = FMATCH_OK;
 
-                                       switch (*optp) {
-                                       case OSFOPT_MSS:
-                                               mss = optp[3];
-                                               mss <<= 8;
-                                               mss |= optp[2];
+                               switch (*optp) {
+                               case OSFOPT_MSS:
+                                       mss = optp[3];
+                                       mss <<= 8;
+                                       mss |= optp[2];
 
-                                               mss = ntohs((__force __be16)mss);
-                                               break;
-                                       case OSFOPT_TS:
-                                               loop_cont = 1;
-                                               break;
-                                       }
+                                       mss = ntohs((__force __be16)mss);
+                                       break;
+                               case OSFOPT_TS:
+                                       loop_cont = 1;
+                                       break;
+                               }
 
-                                       optp = optend;
-                               } else
-                                       fmatch = FMATCH_OPT_WRONG;
+                               optp = optend;
+                       } else
+                               fmatch = FMATCH_OPT_WRONG;
 
-                               if (fmatch != FMATCH_OK)
-                                       break;
-                       }
+                       if (fmatch != FMATCH_OK)
+                               break;
+               }
 
-                       if (fmatch != FMATCH_OPT_WRONG) {
-                               fmatch = FMATCH_WRONG;
+               if (fmatch != FMATCH_OPT_WRONG) {
+                       fmatch = FMATCH_WRONG;
 
-                               switch (check_WSS) {
-                               case OSF_WSS_PLAIN:
-                                       if (f->wss.val == 0 || window == f->wss.val)
-                                               fmatch = FMATCH_OK;
-                                       break;
-                               case OSF_WSS_MSS:
-                                       /*
-                                        * Some smart modems decrease mangle MSS to 
-                                        * SMART_MSS_2, so we check standard, decreased
-                                        * and the one provided in the fingerprint MSS
-                                        * values.
-                                        */
+                       switch (check_WSS) {
+                       case OSF_WSS_PLAIN:
+                               if (f->wss.val == 0 || window == f->wss.val)
+                                       fmatch = FMATCH_OK;
+                               break;
+                       case OSF_WSS_MSS:
+                               /*
+                                * Some smart modems decrease mangle MSS to
+                                * SMART_MSS_2, so we check standard, decreased
+                                * and the one provided in the fingerprint MSS
+                                * values.
+                                */
 #define SMART_MSS_1    1460
 #define SMART_MSS_2    1448
-                                       if (window == f->wss.val * mss ||
-                                           window == f->wss.val * SMART_MSS_1 ||
-                                           window == f->wss.val * SMART_MSS_2)
-                                               fmatch = FMATCH_OK;
-                                       break;
-                               case OSF_WSS_MTU:
-                                       if (window == f->wss.val * (mss + 40) ||
-                                           window == f->wss.val * (SMART_MSS_1 + 40) ||
-                                           window == f->wss.val * (SMART_MSS_2 + 40))
-                                               fmatch = FMATCH_OK;
-                                       break;
-                               case OSF_WSS_MODULO:
-                                       if ((window % f->wss.val) == 0)
-                                               fmatch = FMATCH_OK;
-                                       break;
-                               }
+                               if (window == f->wss.val * mss ||
+                                   window == f->wss.val * SMART_MSS_1 ||
+                                   window == f->wss.val * SMART_MSS_2)
+                                       fmatch = FMATCH_OK;
+                               break;
+                       case OSF_WSS_MTU:
+                               if (window == f->wss.val * (mss + 40) ||
+                                   window == f->wss.val * (SMART_MSS_1 + 40) ||
+                                   window == f->wss.val * (SMART_MSS_2 + 40))
+                                       fmatch = FMATCH_OK;
+                               break;
+                       case OSF_WSS_MODULO:
+                               if ((window % f->wss.val) == 0)
+                                       fmatch = FMATCH_OK;
+                               break;
                        }
+               }
 
-                       if (fmatch != FMATCH_OK)
-                               continue;
+               if (fmatch != FMATCH_OK)
+                       continue;
 
-                       fcount++;
+               fcount++;
 
-                       if (info->flags & XT_OSF_LOG)
-                               nf_log_packet(net, p->family, p->hooknum, skb,
-                                       p->in, p->out, NULL,
-                                       "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n",
-                                       f->genre, f->version, f->subtype,
-                                       &ip->saddr, ntohs(tcp->source),
-                                       &ip->daddr, ntohs(tcp->dest),
-                                       f->ttl - ip->ttl);
+               if (info->flags & XT_OSF_LOG)
+                       nf_log_packet(net, p->family, p->hooknum, skb,
+                                     p->in, p->out, NULL,
+                                     "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n",
+                                     f->genre, f->version, f->subtype,
+                                     &ip->saddr, ntohs(tcp->source),
+                                     &ip->daddr, ntohs(tcp->dest),
+                                     f->ttl - ip->ttl);
 
-                       if ((info->flags & XT_OSF_LOG) &&
-                           info->loglevel == XT_OSF_LOGLEVEL_FIRST)
-                               break;
-               }
+               if ((info->flags & XT_OSF_LOG) &&
+                   info->loglevel == XT_OSF_LOGLEVEL_FIRST)
+                       break;
        }
        rcu_read_unlock();
 
index c2f2a53a487919bcb45e882b9ff8859c699b9d56..7fd1104ba9007ec0d731d97d0a245eb3da7b4436 100644 (file)
@@ -324,8 +324,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
        return 0;
 
 add_std_failure:
-       if (doi_def)
-               cipso_v4_doi_free(doi_def);
+       cipso_v4_doi_free(doi_def);
        return ret_val;
 }
 
@@ -641,7 +640,8 @@ static int netlbl_cipsov4_listall_cb(struct cipso_v4_doi *doi_def, void *arg)
        if (ret_val != 0)
                goto listall_cb_failure;
 
-       return genlmsg_end(cb_arg->skb, data);
+       genlmsg_end(cb_arg->skb, data);
+       return 0;
 
 listall_cb_failure:
        genlmsg_cancel(cb_arg->skb, data);
index e66e977ef2fa0cf3ec52bc2ea3d6604d17de3e47..70440748fe5c439979e26dec5728f832d44859d6 100644 (file)
@@ -93,23 +93,20 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
                                  struct netlbl_audit *audit_info)
 {
        int ret_val = -EINVAL;
-       struct netlbl_dom_map *entry = NULL;
        struct netlbl_domaddr_map *addrmap = NULL;
        struct cipso_v4_doi *cipsov4 = NULL;
        u32 tmp_val;
+       struct netlbl_dom_map *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 
-       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-       if (entry == NULL) {
-               ret_val = -ENOMEM;
-               goto add_failure;
-       }
+       if (!entry)
+               return -ENOMEM;
        entry->def.type = nla_get_u32(info->attrs[NLBL_MGMT_A_PROTOCOL]);
        if (info->attrs[NLBL_MGMT_A_DOMAIN]) {
                size_t tmp_size = nla_len(info->attrs[NLBL_MGMT_A_DOMAIN]);
                entry->domain = kmalloc(tmp_size, GFP_KERNEL);
                if (entry->domain == NULL) {
                        ret_val = -ENOMEM;
-                       goto add_failure;
+                       goto add_free_entry;
                }
                nla_strlcpy(entry->domain,
                            info->attrs[NLBL_MGMT_A_DOMAIN], tmp_size);
@@ -125,16 +122,16 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
                break;
        case NETLBL_NLTYPE_CIPSOV4:
                if (!info->attrs[NLBL_MGMT_A_CV4DOI])
-                       goto add_failure;
+                       goto add_free_domain;
 
                tmp_val = nla_get_u32(info->attrs[NLBL_MGMT_A_CV4DOI]);
                cipsov4 = cipso_v4_doi_getdef(tmp_val);
                if (cipsov4 == NULL)
-                       goto add_failure;
+                       goto add_free_domain;
                entry->def.cipso = cipsov4;
                break;
        default:
-               goto add_failure;
+               goto add_free_domain;
        }
 
        if (info->attrs[NLBL_MGMT_A_IPV4ADDR]) {
@@ -145,7 +142,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
                addrmap = kzalloc(sizeof(*addrmap), GFP_KERNEL);
                if (addrmap == NULL) {
                        ret_val = -ENOMEM;
-                       goto add_failure;
+                       goto add_doi_put_def;
                }
                INIT_LIST_HEAD(&addrmap->list4);
                INIT_LIST_HEAD(&addrmap->list6);
@@ -153,12 +150,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
                if (nla_len(info->attrs[NLBL_MGMT_A_IPV4ADDR]) !=
                    sizeof(struct in_addr)) {
                        ret_val = -EINVAL;
-                       goto add_failure;
+                       goto add_free_addrmap;
                }
                if (nla_len(info->attrs[NLBL_MGMT_A_IPV4MASK]) !=
                    sizeof(struct in_addr)) {
                        ret_val = -EINVAL;
-                       goto add_failure;
+                       goto add_free_addrmap;
                }
                addr = nla_data(info->attrs[NLBL_MGMT_A_IPV4ADDR]);
                mask = nla_data(info->attrs[NLBL_MGMT_A_IPV4MASK]);
@@ -166,7 +163,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
                map = kzalloc(sizeof(*map), GFP_KERNEL);
                if (map == NULL) {
                        ret_val = -ENOMEM;
-                       goto add_failure;
+                       goto add_free_addrmap;
                }
                map->list.addr = addr->s_addr & mask->s_addr;
                map->list.mask = mask->s_addr;
@@ -178,7 +175,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
                ret_val = netlbl_af4list_add(&map->list, &addrmap->list4);
                if (ret_val != 0) {
                        kfree(map);
-                       goto add_failure;
+                       goto add_free_addrmap;
                }
 
                entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
@@ -192,7 +189,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
                addrmap = kzalloc(sizeof(*addrmap), GFP_KERNEL);
                if (addrmap == NULL) {
                        ret_val = -ENOMEM;
-                       goto add_failure;
+                       goto add_doi_put_def;
                }
                INIT_LIST_HEAD(&addrmap->list4);
                INIT_LIST_HEAD(&addrmap->list6);
@@ -200,12 +197,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
                if (nla_len(info->attrs[NLBL_MGMT_A_IPV6ADDR]) !=
                    sizeof(struct in6_addr)) {
                        ret_val = -EINVAL;
-                       goto add_failure;
+                       goto add_free_addrmap;
                }
                if (nla_len(info->attrs[NLBL_MGMT_A_IPV6MASK]) !=
                    sizeof(struct in6_addr)) {
                        ret_val = -EINVAL;
-                       goto add_failure;
+                       goto add_free_addrmap;
                }
                addr = nla_data(info->attrs[NLBL_MGMT_A_IPV6ADDR]);
                mask = nla_data(info->attrs[NLBL_MGMT_A_IPV6MASK]);
@@ -213,7 +210,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
                map = kzalloc(sizeof(*map), GFP_KERNEL);
                if (map == NULL) {
                        ret_val = -ENOMEM;
-                       goto add_failure;
+                       goto add_free_addrmap;
                }
                map->list.addr = *addr;
                map->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
@@ -227,7 +224,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
                ret_val = netlbl_af6list_add(&map->list, &addrmap->list6);
                if (ret_val != 0) {
                        kfree(map);
-                       goto add_failure;
+                       goto add_free_addrmap;
                }
 
                entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
@@ -237,16 +234,17 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
 
        ret_val = netlbl_domhsh_add(entry, audit_info);
        if (ret_val != 0)
-               goto add_failure;
+               goto add_free_addrmap;
 
        return 0;
 
-add_failure:
-       if (cipsov4)
-               cipso_v4_doi_putdef(cipsov4);
-       if (entry)
-               kfree(entry->domain);
+add_free_addrmap:
        kfree(addrmap);
+add_doi_put_def:
+       cipso_v4_doi_putdef(cipsov4);
+add_free_domain:
+       kfree(entry->domain);
+add_free_entry:
        kfree(entry);
        return ret_val;
 }
@@ -456,7 +454,8 @@ static int netlbl_mgmt_listall_cb(struct netlbl_dom_map *entry, void *arg)
                goto listall_cb_failure;
 
        cb_arg->seq++;
-       return genlmsg_end(cb_arg->skb, data);
+       genlmsg_end(cb_arg->skb, data);
+       return 0;
 
 listall_cb_failure:
        genlmsg_cancel(cb_arg->skb, data);
@@ -620,7 +619,8 @@ static int netlbl_mgmt_protocols_cb(struct sk_buff *skb,
        if (ret_val != 0)
                goto protocols_cb_failure;
 
-       return genlmsg_end(skb, data);
+       genlmsg_end(skb, data);
+       return 0;
 
 protocols_cb_failure:
        genlmsg_cancel(skb, data);
index 78a63c18779e443be49b5a9509499ab7cb43869a..aec7994f78cf8df09fab84817c9bbee4ec77daaa 100644 (file)
@@ -1163,7 +1163,8 @@ static int netlbl_unlabel_staticlist_gen(u32 cmd,
                goto list_cb_failure;
 
        cb_arg->seq++;
-       return genlmsg_end(cb_arg->skb, data);
+       genlmsg_end(cb_arg->skb, data);
+       return 0;
 
 list_cb_failure:
        genlmsg_cancel(cb_arg->skb, data);
index 84ea76ca3f1fc52da96c31d3bf27fda678a1dd19..2702673f0f237d7fa43e4649ccc0672f67c895bc 100644 (file)
@@ -61,6 +61,7 @@
 #include <linux/rhashtable.h>
 #include <asm/cacheflush.h>
 #include <linux/hash.h>
+#include <linux/genetlink.h>
 
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -97,12 +98,12 @@ static int netlink_dump(struct sock *sk);
 static void netlink_skb_destructor(struct sk_buff *skb);
 
 /* nl_table locking explained:
- * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock
- * combined with an RCU read-side lock. Insertion and removal are protected
- * with nl_sk_hash_lock while using RCU list modification primitives and may
- * run in parallel to nl_table_lock protected lookups. Destruction of the
- * Netlink socket may only occur *after* nl_table_lock has been acquired
- * either during or after the socket has been removed from the list.
+ * Lookup and traversal are protected with an RCU read-side lock. Insertion
+ * and removal are protected with per bucket lock while using RCU list
+ * modification primitives and may run in parallel to RCU protected lookups.
+ * Destruction of the Netlink socket may only occur *after* nl_table_lock has
+ * been acquired * either during or after the socket has been removed from
+ * the list and after an RCU grace period.
  */
 DEFINE_RWLOCK(nl_table_lock);
 EXPORT_SYMBOL_GPL(nl_table_lock);
@@ -110,19 +111,6 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
 
 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
 
-/* Protects netlink socket hash table mutations */
-DEFINE_MUTEX(nl_sk_hash_lock);
-EXPORT_SYMBOL_GPL(nl_sk_hash_lock);
-
-#ifdef CONFIG_PROVE_LOCKING
-static int lockdep_nl_sk_hash_is_held(void *parent)
-{
-       if (debug_locks)
-               return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock);
-       return 1;
-}
-#endif
-
 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
 
 static DEFINE_SPINLOCK(netlink_tap_lock);
@@ -707,7 +695,7 @@ static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
 
 static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
                                u32 dst_portid, u32 dst_group,
-                               struct sock_iocb *siocb)
+                               struct scm_cookie *scm)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
        struct netlink_ring *ring;
@@ -753,7 +741,7 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
 
                NETLINK_CB(skb).portid    = nlk->portid;
                NETLINK_CB(skb).dst_group = dst_group;
-               NETLINK_CB(skb).creds     = siocb->scm->creds;
+               NETLINK_CB(skb).creds     = scm->creds;
 
                err = security_netlink_send(sk, skb);
                if (err) {
@@ -832,7 +820,7 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
 #define netlink_tx_is_mmaped(sk)       false
 #define netlink_mmap                   sock_no_mmap
 #define netlink_poll                   datagram_poll
-#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb)    0
+#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm)      0
 #endif /* CONFIG_NETLINK_MMAP */
 
 static void netlink_skb_destructor(struct sk_buff *skb)
@@ -1002,26 +990,33 @@ static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
                .net = net,
                .portid = portid,
        };
-       u32 hash;
-
-       hash = rhashtable_hashfn(&table->hash, &portid, sizeof(portid));
 
-       return rhashtable_lookup_compare(&table->hash, hash,
+       return rhashtable_lookup_compare(&table->hash, &portid,
                                         &netlink_compare, &arg);
 }
 
+static bool __netlink_insert(struct netlink_table *table, struct sock *sk)
+{
+       struct netlink_compare_arg arg = {
+               .net = sock_net(sk),
+               .portid = nlk_sk(sk)->portid,
+       };
+
+       return rhashtable_lookup_compare_insert(&table->hash,
+                                               &nlk_sk(sk)->node,
+                                               &netlink_compare, &arg);
+}
+
 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
 {
        struct netlink_table *table = &nl_table[protocol];
        struct sock *sk;
 
-       read_lock(&nl_table_lock);
        rcu_read_lock();
        sk = __netlink_lookup(table, portid, net);
        if (sk)
                sock_hold(sk);
        rcu_read_unlock();
-       read_unlock(&nl_table_lock);
 
        return sk;
 }
@@ -1052,29 +1047,33 @@ netlink_update_listeners(struct sock *sk)
         * makes sure updates are visible before bind or setsockopt return. */
 }
 
-static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
+static int netlink_insert(struct sock *sk, u32 portid)
 {
        struct netlink_table *table = &nl_table[sk->sk_protocol];
-       int err = -EADDRINUSE;
+       int err;
 
-       mutex_lock(&nl_sk_hash_lock);
-       if (__netlink_lookup(table, portid, net))
-               goto err;
+       lock_sock(sk);
 
        err = -EBUSY;
        if (nlk_sk(sk)->portid)
                goto err;
 
        err = -ENOMEM;
-       if (BITS_PER_LONG > 32 && unlikely(table->hash.nelems >= UINT_MAX))
+       if (BITS_PER_LONG > 32 &&
+           unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
                goto err;
 
        nlk_sk(sk)->portid = portid;
        sock_hold(sk);
-       rhashtable_insert(&table->hash, &nlk_sk(sk)->node);
+
        err = 0;
+       if (!__netlink_insert(table, sk)) {
+               err = -EADDRINUSE;
+               sock_put(sk);
+       }
+
 err:
-       mutex_unlock(&nl_sk_hash_lock);
+       release_sock(sk);
        return err;
 }
 
@@ -1082,19 +1081,19 @@ static void netlink_remove(struct sock *sk)
 {
        struct netlink_table *table;
 
-       mutex_lock(&nl_sk_hash_lock);
        table = &nl_table[sk->sk_protocol];
        if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
                WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
                __sock_put(sk);
        }
-       mutex_unlock(&nl_sk_hash_lock);
 
        netlink_table_grab();
        if (nlk_sk(sk)->subscriptions) {
                __sk_del_bind_node(sk);
                netlink_update_listeners(sk);
        }
+       if (sk->sk_protocol == NETLINK_GENERIC)
+               atomic_inc(&genl_sk_destructing_cnt);
        netlink_table_ungrab();
 }
 
@@ -1194,6 +1193,13 @@ out_module:
        goto out;
 }
 
+static void deferred_put_nlk_sk(struct rcu_head *head)
+{
+       struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
+
+       sock_put(&nlk->sk);
+}
+
 static int netlink_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
@@ -1211,6 +1217,20 @@ static int netlink_release(struct socket *sock)
         * will be purged.
         */
 
+       /* must not acquire netlink_table_lock in any way again before unbind
+        * and notifying genetlink is done as otherwise it might deadlock
+        */
+       if (nlk->netlink_unbind) {
+               int i;
+
+               for (i = 0; i < nlk->ngroups; i++)
+                       if (test_bit(i, nlk->groups))
+                               nlk->netlink_unbind(sock_net(sk), i + 1);
+       }
+       if (sk->sk_protocol == NETLINK_GENERIC &&
+           atomic_dec_return(&genl_sk_destructing_cnt) == 0)
+               wake_up(&genl_sk_destructing_waitq);
+
        sock->sk = NULL;
        wake_up_interruptible_all(&nlk->wait);
 
@@ -1246,20 +1266,13 @@ static int netlink_release(struct socket *sock)
                netlink_table_ungrab();
        }
 
-       if (nlk->netlink_unbind) {
-               int i;
-
-               for (i = 0; i < nlk->ngroups; i++)
-                       if (test_bit(i, nlk->groups))
-                               nlk->netlink_unbind(sock_net(sk), i + 1);
-       }
        kfree(nlk->groups);
        nlk->groups = NULL;
 
        local_bh_disable();
        sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
        local_bh_enable();
-       sock_put(sk);
+       call_rcu(&nlk->rcu, deferred_put_nlk_sk);
        return 0;
 }
 
@@ -1274,7 +1287,6 @@ static int netlink_autobind(struct socket *sock)
 
 retry:
        cond_resched();
-       netlink_table_grab();
        rcu_read_lock();
        if (__netlink_lookup(table, portid, net)) {
                /* Bind collision, search negative portid values. */
@@ -1282,13 +1294,11 @@ retry:
                if (rover > -4097)
                        rover = -4097;
                rcu_read_unlock();
-               netlink_table_ungrab();
                goto retry;
        }
        rcu_read_unlock();
-       netlink_table_ungrab();
 
-       err = netlink_insert(sk, net, portid);
+       err = netlink_insert(sk, portid);
        if (err == -EADDRINUSE)
                goto retry;
 
@@ -1428,7 +1438,7 @@ static void netlink_undo_bind(int group, long unsigned int groups,
 
        for (undo = 0; undo < group; undo++)
                if (test_bit(undo, &groups))
-                       nlk->netlink_unbind(sock_net(sk), undo);
+                       nlk->netlink_unbind(sock_net(sk), undo + 1);
 }
 
 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
@@ -1466,7 +1476,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
                for (group = 0; group < nlk->ngroups; group++) {
                        if (!test_bit(group, &groups))
                                continue;
-                       err = nlk->netlink_bind(net, group);
+                       err = nlk->netlink_bind(net, group + 1);
                        if (!err)
                                continue;
                        netlink_undo_bind(group, groups, sk);
@@ -1476,7 +1486,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
 
        if (!nlk->portid) {
                err = nladdr->nl_pid ?
-                       netlink_insert(sk, net, nladdr->nl_pid) :
+                       netlink_insert(sk, nladdr->nl_pid) :
                        netlink_autobind(sock);
                if (err) {
                        netlink_undo_bind(nlk->ngroups, groups, sk);
@@ -2249,7 +2259,6 @@ static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
                           struct msghdr *msg, size_t len)
 {
-       struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
        struct sock *sk = sock->sk;
        struct netlink_sock *nlk = nlk_sk(sk);
        DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
@@ -2263,10 +2272,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
        if (msg->msg_flags&MSG_OOB)
                return -EOPNOTSUPP;
 
-       if (NULL == siocb->scm)
-               siocb->scm = &scm;
-
-       err = scm_send(sock, msg, siocb->scm, true);
+       err = scm_send(sock, msg, &scm, true);
        if (err < 0)
                return err;
 
@@ -2292,10 +2298,15 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
                        goto out;
        }
 
+       /* It's a really convoluted way for userland to ask for mmaped
+        * sendmsg(), but that's what we've got...
+        */
        if (netlink_tx_is_mmaped(sk) &&
+           msg->msg_iter.type == ITER_IOVEC &&
+           msg->msg_iter.nr_segs == 1 &&
            msg->msg_iter.iov->iov_base == NULL) {
                err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
-                                          siocb);
+                                          &scm);
                goto out;
        }
 
@@ -2309,7 +2320,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
 
        NETLINK_CB(skb).portid  = nlk->portid;
        NETLINK_CB(skb).dst_group = dst_group;
-       NETLINK_CB(skb).creds   = siocb->scm->creds;
+       NETLINK_CB(skb).creds   = scm.creds;
        NETLINK_CB(skb).flags   = netlink_skb_flags;
 
        err = -EFAULT;
@@ -2331,7 +2342,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
        err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
 
 out:
-       scm_destroy(siocb->scm);
+       scm_destroy(&scm);
        return err;
 }
 
@@ -2339,7 +2350,6 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
                           struct msghdr *msg, size_t len,
                           int flags)
 {
-       struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
        struct scm_cookie scm;
        struct sock *sk = sock->sk;
        struct netlink_sock *nlk = nlk_sk(sk);
@@ -2402,11 +2412,8 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        if (nlk->flags & NETLINK_RECV_PKTINFO)
                netlink_cmsg_recv_pktinfo(msg, skb);
 
-       if (NULL == siocb->scm) {
-               memset(&scm, 0, sizeof(scm));
-               siocb->scm = &scm;
-       }
-       siocb->scm->creds = *NETLINK_CREDS(skb);
+       memset(&scm, 0, sizeof(scm));
+       scm.creds = *NETLINK_CREDS(skb);
        if (flags & MSG_TRUNC)
                copied = data_skb->len;
 
@@ -2421,7 +2428,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
                }
        }
 
-       scm_recv(sock, msg, siocb->scm, flags);
+       scm_recv(sock, msg, &scm, flags);
 out:
        netlink_rcv_wake(sk);
        return err ? : copied;
@@ -2482,7 +2489,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
        if (cfg && cfg->input)
                nlk_sk(sk)->netlink_rcv = cfg->input;
 
-       if (netlink_insert(sk, net, 0))
+       if (netlink_insert(sk, 0))
                goto out_sock_release;
 
        nlk = nlk_sk(sk);
@@ -2884,97 +2891,97 @@ EXPORT_SYMBOL(nlmsg_notify);
 #ifdef CONFIG_PROC_FS
 struct nl_seq_iter {
        struct seq_net_private p;
+       struct rhashtable_iter hti;
        int link;
-       int hash_idx;
 };
 
-static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
+static int netlink_walk_start(struct nl_seq_iter *iter)
 {
-       struct nl_seq_iter *iter = seq->private;
-       int i, j;
-       struct netlink_sock *nlk;
-       struct sock *s;
-       loff_t off = 0;
-
-       for (i = 0; i < MAX_LINKS; i++) {
-               struct rhashtable *ht = &nl_table[i].hash;
-               const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
-
-               for (j = 0; j < tbl->size; j++) {
-                       rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) {
-                               s = (struct sock *)nlk;
+       int err;
 
-                               if (sock_net(s) != seq_file_net(seq))
-                                       continue;
-                               if (off == pos) {
-                                       iter->link = i;
-                                       iter->hash_idx = j;
-                                       return s;
-                               }
-                               ++off;
-                       }
-               }
+       err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
+       if (err) {
+               iter->link = MAX_LINKS;
+               return err;
        }
-       return NULL;
+
+       err = rhashtable_walk_start(&iter->hti);
+       return err == -EAGAIN ? 0 : err;
 }
 
-static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(nl_table_lock) __acquires(RCU)
+static void netlink_walk_stop(struct nl_seq_iter *iter)
 {
-       read_lock(&nl_table_lock);
-       rcu_read_lock();
-       return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+       rhashtable_walk_stop(&iter->hti);
+       rhashtable_walk_exit(&iter->hti);
 }
 
-static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+static void *__netlink_seq_next(struct seq_file *seq)
 {
-       struct rhashtable *ht;
+       struct nl_seq_iter *iter = seq->private;
        struct netlink_sock *nlk;
-       struct nl_seq_iter *iter;
-       struct net *net;
-       int i, j;
 
-       ++*pos;
+       do {
+               for (;;) {
+                       int err;
 
-       if (v == SEQ_START_TOKEN)
-               return netlink_seq_socket_idx(seq, 0);
+                       nlk = rhashtable_walk_next(&iter->hti);
 
-       net = seq_file_net(seq);
-       iter = seq->private;
-       nlk = v;
+                       if (IS_ERR(nlk)) {
+                               if (PTR_ERR(nlk) == -EAGAIN)
+                                       continue;
 
-       i = iter->link;
-       ht = &nl_table[i].hash;
-       rht_for_each_entry(nlk, nlk->node.next, ht, node)
-               if (net_eq(sock_net((struct sock *)nlk), net))
-                       return nlk;
+                               return nlk;
+                       }
 
-       j = iter->hash_idx + 1;
+                       if (nlk)
+                               break;
 
-       do {
-               const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
-
-               for (; j < tbl->size; j++) {
-                       rht_for_each_entry(nlk, tbl->buckets[j], ht, node) {
-                               if (net_eq(sock_net((struct sock *)nlk), net)) {
-                                       iter->link = i;
-                                       iter->hash_idx = j;
-                                       return nlk;
-                               }
-                       }
+                       netlink_walk_stop(iter);
+                       if (++iter->link >= MAX_LINKS)
+                               return NULL;
+
+                       err = netlink_walk_start(iter);
+                       if (err)
+                               return ERR_PTR(err);
                }
+       } while (sock_net(&nlk->sk) != seq_file_net(seq));
 
-               j = 0;
-       } while (++i < MAX_LINKS);
+       return nlk;
+}
 
-       return NULL;
+static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
+{
+       struct nl_seq_iter *iter = seq->private;
+       void *obj = SEQ_START_TOKEN;
+       loff_t pos;
+       int err;
+
+       iter->link = 0;
+
+       err = netlink_walk_start(iter);
+       if (err)
+               return ERR_PTR(err);
+
+       for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
+               obj = __netlink_seq_next(seq);
+
+       return obj;
+}
+
+static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       ++*pos;
+       return __netlink_seq_next(seq);
 }
 
 static void netlink_seq_stop(struct seq_file *seq, void *v)
-       __releases(RCU) __releases(nl_table_lock)
 {
-       rcu_read_unlock();
-       read_unlock(&nl_table_lock);
+       struct nl_seq_iter *iter = seq->private;
+
+       if (iter->link >= MAX_LINKS)
+               return;
+
+       netlink_walk_stop(iter);
 }
 
 
@@ -3121,9 +3128,6 @@ static int __init netlink_proto_init(void)
                .max_shift = 16, /* 64K */
                .grow_decision = rht_grow_above_75,
                .shrink_decision = rht_shrink_below_30,
-#ifdef CONFIG_PROVE_LOCKING
-               .mutex_is_held = lockdep_nl_sk_hash_is_held,
-#endif
        };
 
        if (err != 0)
index f123a88496f8f5282287ba5028ae03110d25bb7c..89008405d6b4d2c9f2d82850fa872be98553e154 100644 (file)
@@ -2,6 +2,7 @@
 #define _AF_NETLINK_H
 
 #include <linux/rhashtable.h>
+#include <linux/atomic.h>
 #include <net/sock.h>
 
 #define NLGRPSZ(x)     (ALIGN(x, sizeof(unsigned long) * 8) / 8)
@@ -50,6 +51,7 @@ struct netlink_sock {
 #endif /* CONFIG_NETLINK_MMAP */
 
        struct rhash_head       node;
+       struct rcu_head         rcu;
 };
 
 static inline struct netlink_sock *nlk_sk(struct sock *sk)
@@ -73,6 +75,5 @@ struct netlink_table {
 
 extern struct netlink_table *nl_table;
 extern rwlock_t nl_table_lock;
-extern struct mutex nl_sk_hash_lock;
 
 #endif
index de8c74a3c0615ac98ee13ab403491afd8fcac3eb..3ee63a3cff3049cba8f837713394a5123bf54708 100644 (file)
@@ -91,7 +91,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
            sk_diag_put_rings_cfg(sk, skb))
                goto out_nlmsg_trim;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 out_nlmsg_trim:
        nlmsg_cancel(skb, nlh);
@@ -103,7 +104,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
 {
        struct netlink_table *tbl = &nl_table[protocol];
        struct rhashtable *ht = &tbl->hash;
-       const struct bucket_table *htbl = rht_dereference(ht->tbl, ht);
+       const struct bucket_table *htbl = rht_dereference_rcu(ht->tbl, ht);
        struct net *net = sock_net(skb->sk);
        struct netlink_diag_req *req;
        struct netlink_sock *nlsk;
@@ -113,7 +114,9 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
        req = nlmsg_data(cb->nlh);
 
        for (i = 0; i < htbl->size; i++) {
-               rht_for_each_entry(nlsk, htbl->buckets[i], ht, node) {
+               struct rhash_head *pos;
+
+               rht_for_each_entry_rcu(nlsk, pos, htbl, i, node) {
                        sk = (struct sock *)nlsk;
 
                        if (!net_eq(sock_net(sk), net))
@@ -170,7 +173,7 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
        req = nlmsg_data(cb->nlh);
 
-       mutex_lock(&nl_sk_hash_lock);
+       rcu_read_lock();
        read_lock(&nl_table_lock);
 
        if (req->sdiag_protocol == NDIAG_PROTO_ALL) {
@@ -184,7 +187,7 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
        } else {
                if (req->sdiag_protocol >= MAX_LINKS) {
                        read_unlock(&nl_table_lock);
-                       mutex_unlock(&nl_sk_hash_lock);
+                       rcu_read_unlock();
                        return -ENOENT;
                }
 
@@ -192,7 +195,7 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
        }
 
        read_unlock(&nl_table_lock);
-       mutex_unlock(&nl_sk_hash_lock);
+       rcu_read_unlock();
 
        return skb->len;
 }
index 2e11061ef885562d2ff2a098448a7d9c8d1b64ee..2ed5f964772ee44fc693cf18bc6055308820bf78 100644 (file)
@@ -23,6 +23,9 @@
 static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
 static DECLARE_RWSEM(cb_lock);
 
+atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
+DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
+
 void genl_lock(void)
 {
        mutex_lock(&genl_mutex);
@@ -435,15 +438,18 @@ int genl_unregister_family(struct genl_family *family)
 
        genl_lock_all();
 
-       genl_unregister_mc_groups(family);
-
        list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
                if (family->id != rc->id || strcmp(rc->name, family->name))
                        continue;
 
+               genl_unregister_mc_groups(family);
+
                list_del(&rc->family_list);
                family->n_ops = 0;
-               genl_unlock_all();
+               up_write(&cb_lock);
+               wait_event(genl_sk_destructing_waitq,
+                          atomic_read(&genl_sk_destructing_cnt) == 0);
+               genl_unlock();
 
                kfree(family->attrbuf);
                genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
@@ -756,7 +762,8 @@ static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
                nla_nest_end(skb, nla_grps);
        }
 
-       return genlmsg_end(skb, hdr);
+       genlmsg_end(skb, hdr);
+       return 0;
 
 nla_put_failure:
        genlmsg_cancel(skb, hdr);
@@ -796,7 +803,8 @@ static int ctrl_fill_mcgrp_info(struct genl_family *family,
        nla_nest_end(skb, nest);
        nla_nest_end(skb, nla_grps);
 
-       return genlmsg_end(skb, hdr);
+       genlmsg_end(skb, hdr);
+       return 0;
 
 nla_put_failure:
        genlmsg_cancel(skb, hdr);
@@ -985,7 +993,7 @@ static struct genl_multicast_group genl_ctrl_groups[] = {
 
 static int genl_bind(struct net *net, int group)
 {
-       int i, err = 0;
+       int i, err = -ENOENT;
 
        down_read(&cb_lock);
        for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
@@ -1014,7 +1022,6 @@ static int genl_bind(struct net *net, int group)
 static void genl_unbind(struct net *net, int group)
 {
        int i;
-       bool found = false;
 
        down_read(&cb_lock);
        for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
@@ -1027,14 +1034,11 @@ static void genl_unbind(struct net *net, int group)
 
                                if (f->mcast_unbind)
                                        f->mcast_unbind(net, fam_grp);
-                               found = true;
                                break;
                        }
                }
        }
        up_read(&cb_lock);
-
-       WARN_ON(!found);
 }
 
 static int __net_init genl_pernet_init(struct net *net)
index 819b87702b7039ceed68085139d2820a2d5711c0..cff3f1614ad4557ff178804acb55bacbad328149 100644 (file)
@@ -555,7 +555,6 @@ EXPORT_SYMBOL(nfc_find_se);
 
 int nfc_enable_se(struct nfc_dev *dev, u32 se_idx)
 {
-
        struct nfc_se *se;
        int rc;
 
@@ -605,7 +604,6 @@ error:
 
 int nfc_disable_se(struct nfc_dev *dev, u32 se_idx)
 {
-
        struct nfc_se *se;
        int rc;
 
@@ -934,6 +932,27 @@ int nfc_remove_se(struct nfc_dev *dev, u32 se_idx)
 }
 EXPORT_SYMBOL(nfc_remove_se);
 
+int nfc_se_transaction(struct nfc_dev *dev, u8 se_idx,
+                      struct nfc_evt_transaction *evt_transaction)
+{
+       int rc;
+
+       pr_debug("transaction: %x\n", se_idx);
+
+       device_lock(&dev->dev);
+
+       if (!evt_transaction) {
+               rc = -EPROTO;
+               goto out;
+       }
+
+       rc = nfc_genl_se_transaction(dev, se_idx, evt_transaction);
+out:
+       device_unlock(&dev->dev);
+       return rc;
+}
+EXPORT_SYMBOL(nfc_se_transaction);
+
 static void nfc_release(struct device *d)
 {
        struct nfc_dev *dev = to_nfc_dev(d);
index 91df487aa0a9cf2a69f368a9211e80efcc06b012..844673cb7c18d7c587897453c3e289d06271a0e2 100644 (file)
@@ -116,23 +116,6 @@ int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
 }
 EXPORT_SYMBOL(nfc_hci_send_event);
 
-int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response,
-                         const u8 *param, size_t param_len)
-{
-       u8 pipe;
-
-       pr_debug("\n");
-
-       pipe = hdev->gate2pipe[gate];
-       if (pipe == NFC_HCI_INVALID_PIPE)
-               return -EADDRNOTAVAIL;
-
-       return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_RESPONSE,
-                                     response, param, param_len, NULL, NULL,
-                                     0);
-}
-EXPORT_SYMBOL(nfc_hci_send_response);
-
 /*
  * Execute an hci command sent to gate.
  * skb will contain response data if success. skb can be NULL if you are not
@@ -331,7 +314,7 @@ int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev)
        if (r < 0)
                return r;
 
-       memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+       nfc_hci_reset_pipes(hdev);
 
        return 0;
 }
@@ -345,7 +328,7 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate,
 
        pr_debug("\n");
 
-       if (hdev->gate2pipe[dest_gate] == NFC_HCI_DO_NOT_CREATE_PIPE)
+       if (pipe == NFC_HCI_DO_NOT_CREATE_PIPE)
                return 0;
 
        if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE)
@@ -380,6 +363,8 @@ open_pipe:
                return r;
        }
 
+       hdev->pipes[pipe].gate = dest_gate;
+       hdev->pipes[pipe].dest_host = dest_host;
        hdev->gate2pipe[dest_gate] = pipe;
 
        return 0;
index ef50e7716c4a8742840731db126351fee16bc57b..6e061da2258a8bd32f80f585aaaf7e951180b7d3 100644 (file)
@@ -46,6 +46,32 @@ int nfc_hci_result_to_errno(u8 result)
 }
 EXPORT_SYMBOL(nfc_hci_result_to_errno);
 
+void nfc_hci_reset_pipes(struct nfc_hci_dev *hdev)
+{
+       int i = 0;
+
+       for (i = 0; i < NFC_HCI_MAX_PIPES; i++) {
+               hdev->pipes[i].gate = NFC_HCI_INVALID_GATE;
+               hdev->pipes[i].dest_host = NFC_HCI_INVALID_HOST;
+       }
+       memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+}
+EXPORT_SYMBOL(nfc_hci_reset_pipes);
+
+void nfc_hci_reset_pipes_per_host(struct nfc_hci_dev *hdev, u8 host)
+{
+       int i = 0;
+
+       for (i = 0; i < NFC_HCI_MAX_PIPES; i++) {
+               if (hdev->pipes[i].dest_host != host)
+                       continue;
+
+               hdev->pipes[i].gate = NFC_HCI_INVALID_GATE;
+               hdev->pipes[i].dest_host = NFC_HCI_INVALID_HOST;
+       }
+}
+EXPORT_SYMBOL(nfc_hci_reset_pipes_per_host);
+
 static void nfc_hci_msg_tx_work(struct work_struct *work)
 {
        struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
@@ -167,48 +193,69 @@ exit:
 void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
                          struct sk_buff *skb)
 {
-       int r = 0;
-       u8 gate = nfc_hci_pipe2gate(hdev, pipe);
-       u8 local_gate, new_pipe;
-       u8 gate_opened = 0x00;
+       u8 gate = hdev->pipes[pipe].gate;
+       u8 status = NFC_HCI_ANY_OK;
+       struct hci_create_pipe_resp *create_info;
+       struct hci_delete_pipe_noti *delete_info;
+       struct hci_all_pipe_cleared_noti *cleared_info;
 
        pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd);
 
        switch (cmd) {
        case NFC_HCI_ADM_NOTIFY_PIPE_CREATED:
                if (skb->len != 5) {
-                       r = -EPROTO;
-                       break;
+                       status = NFC_HCI_ANY_E_NOK;
+                       goto exit;
                }
+               create_info = (struct hci_create_pipe_resp *)skb->data;
 
-               local_gate = skb->data[3];
-               new_pipe = skb->data[4];
-               nfc_hci_send_response(hdev, gate, NFC_HCI_ANY_OK, NULL, 0);
-
-               /* save the new created pipe and bind with local gate,
+               /* Save the new created pipe and bind with local gate,
                 * the description for skb->data[3] is destination gate id
                 * but since we received this cmd from host controller, we
                 * are the destination and it is our local gate
                 */
-               hdev->gate2pipe[local_gate] = new_pipe;
+               hdev->gate2pipe[create_info->dest_gate] = create_info->pipe;
+               hdev->pipes[create_info->pipe].gate = create_info->dest_gate;
+               hdev->pipes[create_info->pipe].dest_host =
+                                                       create_info->src_host;
                break;
        case NFC_HCI_ANY_OPEN_PIPE:
-               /* if the pipe is already created, we allow remote host to
-                * open it
-                */
-               if (gate != 0xff)
-                       nfc_hci_send_response(hdev, gate, NFC_HCI_ANY_OK,
-                                             &gate_opened, 1);
+               if (gate == NFC_HCI_INVALID_GATE) {
+                       status = NFC_HCI_ANY_E_NOK;
+                       goto exit;
+               }
+               break;
+       case NFC_HCI_ADM_NOTIFY_PIPE_DELETED:
+               if (skb->len != 1) {
+                       status = NFC_HCI_ANY_E_NOK;
+                       goto exit;
+               }
+               delete_info = (struct hci_delete_pipe_noti *)skb->data;
+
+               hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
+               hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
                break;
        case NFC_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED:
-               nfc_hci_send_response(hdev, gate, NFC_HCI_ANY_OK, NULL, 0);
+               if (skb->len != 1) {
+                       status = NFC_HCI_ANY_E_NOK;
+                       goto exit;
+               }
+               cleared_info = (struct hci_all_pipe_cleared_noti *)skb->data;
+
+               nfc_hci_reset_pipes_per_host(hdev, cleared_info->host);
                break;
        default:
                pr_info("Discarded unknown cmd %x to gate %x\n", cmd, gate);
-               r = -EINVAL;
                break;
        }
 
+       if (hdev->ops->cmd_received)
+               hdev->ops->cmd_received(hdev, pipe, cmd, skb);
+
+exit:
+       nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_RESPONSE,
+                              status, NULL, 0, NULL, NULL, 0);
+
        kfree_skb(skb);
 }
 
@@ -330,15 +377,15 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
                            struct sk_buff *skb)
 {
        int r = 0;
-       u8 gate = nfc_hci_pipe2gate(hdev, pipe);
+       u8 gate = hdev->pipes[pipe].gate;
 
-       if (gate == 0xff) {
+       if (gate == NFC_HCI_INVALID_GATE) {
                pr_err("Discarded event %x to unopened pipe %x\n", event, pipe);
                goto exit;
        }
 
        if (hdev->ops->event_received) {
-               r = hdev->ops->event_received(hdev, gate, event, skb);
+               r = hdev->ops->event_received(hdev, pipe, event, skb);
                if (r <= 0)
                        goto exit_noskb;
        }
@@ -573,7 +620,7 @@ static int hci_dev_down(struct nfc_dev *nfc_dev)
        if (hdev->ops->close)
                hdev->ops->close(hdev);
 
-       memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+       nfc_hci_reset_pipes(hdev);
 
        return 0;
 }
@@ -932,7 +979,7 @@ struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
 
        nfc_set_drvdata(hdev->ndev, hdev);
 
-       memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+       nfc_hci_reset_pipes(hdev);
 
        hdev->quirks = quirks;
 
index c3d2e2c1394cb648ff548f19fb0741794ea55fea..ab4c8e80b1ad57aadf1fa885601d94394517e34d 100644 (file)
@@ -65,6 +65,14 @@ struct hci_create_pipe_resp {
        u8 pipe;
 } __packed;
 
+struct hci_delete_pipe_noti {
+       u8 pipe;
+} __packed;
+
+struct hci_all_pipe_cleared_noti {
+       u8 host;
+} __packed;
+
 #define NFC_HCI_FRAGMENT       0x7f
 
 #define HCP_HEADER(type, instr) ((((type) & 0x03) << 6) | ((instr) & 0x3f))
@@ -77,8 +85,6 @@ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
                           data_exchange_cb_t cb, void *cb_context,
                           unsigned long completion_delay);
 
-u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe);
-
 void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type,
                            u8 instruction, struct sk_buff *skb);
 
index e9de1514656ecff2ff17253e65edc5c438a1cee0..1fe725d660852bb2422c94d37cc566e793a18464 100644 (file)
@@ -124,17 +124,6 @@ out_skb_err:
        return err;
 }
 
-u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe)
-{
-       int gate;
-
-       for (gate = 0; gate < NFC_HCI_MAX_GATES; gate++)
-               if (hdev->gate2pipe[gate] == pipe)
-                       return gate;
-
-       return 0xff;
-}
-
 /*
  * Receive hcp message for pipe, with type and cmd.
  * skb contains optional message data only.
index 7aeedc43187db0f7fce4727f4aed15c4a6cefe63..7ed8949266cc6930268098645e4cd2586fdfa9d6 100644 (file)
@@ -4,6 +4,6 @@
 
 obj-$(CONFIG_NFC_NCI) += nci.o
 
-nci-objs := core.o data.o lib.o ntf.o rsp.o
+nci-objs := core.o data.o lib.o ntf.o rsp.o hci.o
 
 nci-$(CONFIG_NFC_NCI_SPI) += spi.o
index 51feb5e630082a78d67c8342c0883168df9b2fb2..9575a1892607c5dbf18f5bc04f531ed826dfe812 100644 (file)
 #include <net/nfc/nci_core.h>
 #include <linux/nfc.h>
 
+struct core_conn_create_data {
+       int length;
+       struct nci_core_conn_create_cmd *cmd;
+};
+
 static void nci_cmd_work(struct work_struct *work);
 static void nci_rx_work(struct work_struct *work);
 static void nci_tx_work(struct work_struct *work);
 
+struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev,
+                                                  int conn_id)
+{
+       struct nci_conn_info *conn_info;
+
+       list_for_each_entry(conn_info, &ndev->conn_info_list, list) {
+               if (conn_info->conn_id == conn_id)
+                       return conn_info;
+       }
+
+       return NULL;
+}
+
 /* ---- NCI requests ---- */
 
 void nci_req_complete(struct nci_dev *ndev, int result)
@@ -109,10 +127,10 @@ static int __nci_request(struct nci_dev *ndev,
        return rc;
 }
 
-static inline int nci_request(struct nci_dev *ndev,
-                             void (*req)(struct nci_dev *ndev,
-                                         unsigned long opt),
-                             unsigned long opt, __u32 timeout)
+inline int nci_request(struct nci_dev *ndev,
+                      void (*req)(struct nci_dev *ndev,
+                                  unsigned long opt),
+                      unsigned long opt, __u32 timeout)
 {
        int rc;
 
@@ -456,6 +474,95 @@ int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val)
 }
 EXPORT_SYMBOL(nci_set_config);
 
+static void nci_nfcee_discover_req(struct nci_dev *ndev, unsigned long opt)
+{
+       struct nci_nfcee_discover_cmd cmd;
+       __u8 action = opt;
+
+       cmd.discovery_action = action;
+
+       nci_send_cmd(ndev, NCI_OP_NFCEE_DISCOVER_CMD, 1, &cmd);
+}
+
+int nci_nfcee_discover(struct nci_dev *ndev, u8 action)
+{
+       return nci_request(ndev, nci_nfcee_discover_req, action,
+                               msecs_to_jiffies(NCI_CMD_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_nfcee_discover);
+
+static void nci_nfcee_mode_set_req(struct nci_dev *ndev, unsigned long opt)
+{
+       struct nci_nfcee_mode_set_cmd *cmd =
+                                       (struct nci_nfcee_mode_set_cmd *)opt;
+
+       nci_send_cmd(ndev, NCI_OP_NFCEE_MODE_SET_CMD,
+                    sizeof(struct nci_nfcee_mode_set_cmd), cmd);
+}
+
+int nci_nfcee_mode_set(struct nci_dev *ndev, u8 nfcee_id, u8 nfcee_mode)
+{
+       struct nci_nfcee_mode_set_cmd cmd;
+
+       cmd.nfcee_id = nfcee_id;
+       cmd.nfcee_mode = nfcee_mode;
+
+       return nci_request(ndev, nci_nfcee_mode_set_req, (unsigned long)&cmd,
+                          msecs_to_jiffies(NCI_CMD_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_nfcee_mode_set);
+
+static void nci_core_conn_create_req(struct nci_dev *ndev, unsigned long opt)
+{
+       struct core_conn_create_data *data =
+                                       (struct core_conn_create_data *)opt;
+
+       nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, data->length, data->cmd);
+}
+
+int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type,
+                        u8 number_destination_params,
+                        size_t params_len,
+                        struct core_conn_create_dest_spec_params *params)
+{
+       int r;
+       struct nci_core_conn_create_cmd *cmd;
+       struct core_conn_create_data data;
+
+       data.length = params_len + sizeof(struct nci_core_conn_create_cmd);
+       cmd = kzalloc(data.length, GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       cmd->destination_type = destination_type;
+       cmd->number_destination_params = number_destination_params;
+       memcpy(cmd->params, params, params_len);
+
+       data.cmd = cmd;
+       ndev->cur_id = params->value[DEST_SPEC_PARAMS_ID_INDEX];
+
+       r = __nci_request(ndev, nci_core_conn_create_req,
+                         (unsigned long)&data,
+                         msecs_to_jiffies(NCI_CMD_TIMEOUT));
+       kfree(cmd);
+       return r;
+}
+EXPORT_SYMBOL(nci_core_conn_create);
+
+static void nci_core_conn_close_req(struct nci_dev *ndev, unsigned long opt)
+{
+       __u8 conn_id = opt;
+
+       nci_send_cmd(ndev, NCI_OP_CORE_CONN_CLOSE_CMD, 1, &conn_id);
+}
+
+int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id)
+{
+       return nci_request(ndev, nci_core_conn_close_req, conn_id,
+                               msecs_to_jiffies(NCI_CMD_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_core_conn_close);
+
 static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev)
 {
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
@@ -712,6 +819,11 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
 {
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
        int rc;
+       struct nci_conn_info    *conn_info;
+
+       conn_info = ndev->rf_conn_info;
+       if (!conn_info)
+               return -EPROTO;
 
        pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
 
@@ -724,8 +836,8 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
                return -EBUSY;
 
        /* store cb and context to be used on receiving data */
-       ndev->data_exchange_cb = cb;
-       ndev->data_exchange_cb_context = cb_context;
+       conn_info->data_exchange_cb = cb;
+       conn_info->data_exchange_cb_context = cb_context;
 
        rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
        if (rc)
@@ -768,10 +880,16 @@ static int nci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx)
 
 static int nci_discover_se(struct nfc_dev *nfc_dev)
 {
+       int r;
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-       if (ndev->ops->discover_se)
+       if (ndev->ops->discover_se) {
+               r = nci_nfcee_discover(ndev, NCI_NFCEE_DISCOVERY_ACTION_ENABLE);
+               if (r != NCI_STATUS_OK)
+                       return -EPROTO;
+
                return ndev->ops->discover_se(ndev);
+       }
 
        return 0;
 }
@@ -807,7 +925,6 @@ static struct nfc_ops nci_nfc_ops = {
 };
 
 /* ---- Interface to NCI drivers ---- */
-
 /**
  * nci_allocate_device - allocate a new nci device
  *
@@ -842,13 +959,20 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
                                            tx_headroom + NCI_DATA_HDR_SIZE,
                                            tx_tailroom);
        if (!ndev->nfc_dev)
-               goto free_exit;
+               goto free_nci;
+
+       ndev->hci_dev = nci_hci_allocate(ndev);
+       if (!ndev->hci_dev)
+               goto free_nfc;
 
        nfc_set_drvdata(ndev->nfc_dev, ndev);
 
        return ndev;
 
-free_exit:
+free_nfc:
+       kfree(ndev->nfc_dev);
+
+free_nci:
        kfree(ndev);
        return NULL;
 }
@@ -913,6 +1037,7 @@ int nci_register_device(struct nci_dev *ndev)
                    (unsigned long) ndev);
 
        mutex_init(&ndev->req_lock);
+       INIT_LIST_HEAD(&ndev->conn_info_list);
 
        rc = nfc_register_device(ndev->nfc_dev);
        if (rc)
@@ -938,12 +1063,19 @@ EXPORT_SYMBOL(nci_register_device);
  */
 void nci_unregister_device(struct nci_dev *ndev)
 {
+       struct nci_conn_info    *conn_info, *n;
+
        nci_close_device(ndev);
 
        destroy_workqueue(ndev->cmd_wq);
        destroy_workqueue(ndev->rx_wq);
        destroy_workqueue(ndev->tx_wq);
 
+       list_for_each_entry_safe(conn_info, n, &ndev->conn_info_list, list) {
+               list_del(&conn_info->list);
+               /* conn_info is allocated with devm_kzalloc */
+       }
+
        nfc_unregister_device(ndev->nfc_dev);
 }
 EXPORT_SYMBOL(nci_unregister_device);
@@ -1027,20 +1159,25 @@ int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
 static void nci_tx_work(struct work_struct *work)
 {
        struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
+       struct nci_conn_info    *conn_info;
        struct sk_buff *skb;
 
-       pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
+       conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_conn_id);
+       if (!conn_info)
+               return;
+
+       pr_debug("credits_cnt %d\n", atomic_read(&conn_info->credits_cnt));
 
        /* Send queued tx data */
-       while (atomic_read(&ndev->credits_cnt)) {
+       while (atomic_read(&conn_info->credits_cnt)) {
                skb = skb_dequeue(&ndev->tx_q);
                if (!skb)
                        return;
 
                /* Check if data flow control is used */
-               if (atomic_read(&ndev->credits_cnt) !=
+               if (atomic_read(&conn_info->credits_cnt) !=
                    NCI_DATA_FLOW_CONTROL_NOT_USED)
-                       atomic_dec(&ndev->credits_cnt);
+                       atomic_dec(&conn_info->credits_cnt);
 
                pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
                         nci_pbf(skb->data),
@@ -1092,7 +1229,9 @@ static void nci_rx_work(struct work_struct *work)
        if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) {
                /* complete the data exchange transaction, if exists */
                if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
-                       nci_data_exchange_complete(ndev, NULL, -ETIMEDOUT);
+                       nci_data_exchange_complete(ndev, NULL,
+                                                  ndev->cur_conn_id,
+                                                  -ETIMEDOUT);
 
                clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
        }
index a2de2a8cb00e2db62b46dc237f3538bc574b18b1..566466d90048d92e3ac7c31fa73c32d398089126 100644 (file)
 
 /* Complete data exchange transaction and forward skb to nfc core */
 void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
-                               int err)
+                               __u8 conn_id, int err)
 {
-       data_exchange_cb_t cb = ndev->data_exchange_cb;
-       void *cb_context = ndev->data_exchange_cb_context;
+       struct nci_conn_info    *conn_info;
+       data_exchange_cb_t cb;
+       void *cb_context;
+
+       conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+       if (!conn_info) {
+               kfree_skb(skb);
+               goto exit;
+       }
+
+       cb = conn_info->data_exchange_cb;
+       cb_context = conn_info->data_exchange_cb_context;
 
        pr_debug("len %d, err %d\n", skb ? skb->len : 0, err);
 
@@ -48,9 +58,6 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
        clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
 
        if (cb) {
-               ndev->data_exchange_cb = NULL;
-               ndev->data_exchange_cb_context = NULL;
-
                /* forward skb to nfc core */
                cb(cb_context, skb, err);
        } else if (skb) {
@@ -60,6 +67,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
                kfree_skb(skb);
        }
 
+exit:
        clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
 }
 
@@ -85,6 +93,7 @@ static inline void nci_push_data_hdr(struct nci_dev *ndev,
 static int nci_queue_tx_data_frags(struct nci_dev *ndev,
                                   __u8 conn_id,
                                   struct sk_buff *skb) {
+       struct nci_conn_info    *conn_info;
        int total_len = skb->len;
        unsigned char *data = skb->data;
        unsigned long flags;
@@ -95,11 +104,17 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
 
        pr_debug("conn_id 0x%x, total_len %d\n", conn_id, total_len);
 
+       conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+       if (!conn_info) {
+               rc = -EPROTO;
+               goto free_exit;
+       }
+
        __skb_queue_head_init(&frags_q);
 
        while (total_len) {
                frag_len =
-                       min_t(int, total_len, ndev->max_data_pkt_payload_size);
+                       min_t(int, total_len, conn_info->max_pkt_payload_len);
 
                skb_frag = nci_skb_alloc(ndev,
                                         (NCI_DATA_HDR_SIZE + frag_len),
@@ -151,12 +166,19 @@ exit:
 /* Send NCI data */
 int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
 {
+       struct nci_conn_info    *conn_info;
        int rc = 0;
 
        pr_debug("conn_id 0x%x, plen %d\n", conn_id, skb->len);
 
+       conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+       if (!conn_info) {
+               rc = -EPROTO;
+               goto free_exit;
+       }
+
        /* check if the packet need to be fragmented */
-       if (skb->len <= ndev->max_data_pkt_payload_size) {
+       if (skb->len <= conn_info->max_pkt_payload_len) {
                /* no need to fragment packet */
                nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST);
 
@@ -170,6 +192,7 @@ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
                }
        }
 
+       ndev->cur_conn_id = conn_id;
        queue_work(ndev->tx_wq, &ndev->tx_work);
 
        goto exit;
@@ -185,7 +208,7 @@ exit:
 
 static void nci_add_rx_data_frag(struct nci_dev *ndev,
                                 struct sk_buff *skb,
-                                __u8 pbf, __u8 status)
+                                __u8 pbf, __u8 conn_id, __u8 status)
 {
        int reassembly_len;
        int err = 0;
@@ -229,16 +252,13 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
        }
 
 exit:
-       if (ndev->nfc_dev->rf_mode == NFC_RF_INITIATOR) {
-               nci_data_exchange_complete(ndev, skb, err);
-       } else if (ndev->nfc_dev->rf_mode == NFC_RF_TARGET) {
+       if (ndev->nfc_dev->rf_mode == NFC_RF_TARGET) {
                /* Data received in Target mode, forward to nfc core */
                err = nfc_tm_data_received(ndev->nfc_dev, skb);
                if (err)
                        pr_err("unable to handle received data\n");
        } else {
-               pr_err("rf mode unknown\n");
-               kfree_skb(skb);
+               nci_data_exchange_complete(ndev, skb, conn_id, err);
        }
 }
 
@@ -247,6 +267,8 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
 {
        __u8 pbf = nci_pbf(skb->data);
        __u8 status = 0;
+       __u8 conn_id = nci_conn_id(skb->data);
+       struct nci_conn_info    *conn_info;
 
        pr_debug("len %d\n", skb->len);
 
@@ -255,6 +277,10 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
                 nci_conn_id(skb->data),
                 nci_plen(skb->data));
 
+       conn_info = nci_get_conn_info_by_conn_id(ndev, nci_conn_id(skb->data));
+       if (!conn_info)
+               return;
+
        /* strip the nci data header */
        skb_pull(skb, NCI_DATA_HDR_SIZE);
 
@@ -268,5 +294,5 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
                skb_trim(skb, (skb->len - 1));
        }
 
-       nci_add_rx_data_frag(ndev, skb, pbf, nci_to_errno(status));
+       nci_add_rx_data_frag(ndev, skb, pbf, conn_id, nci_to_errno(status));
 }
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
new file mode 100644 (file)
index 0000000..ed54ec5
--- /dev/null
@@ -0,0 +1,694 @@
+/*
+ *  The NFC Controller Interface is the communication protocol between an
+ *  NFC Controller (NFCC) and a Device Host (DH).
+ *  This is the HCI over NCI implementation, as specified in the 10.2
+ *  section of the NCI 1.1 specification.
+ *
+ *  Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/skbuff.h>
+
+#include "../nfc.h"
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/nfc.h>
+
+struct nci_data {
+       u8              conn_id;
+       u8              pipe;
+       u8              cmd;
+       const u8        *data;
+       u32             data_len;
+} __packed;
+
+struct nci_hci_create_pipe_params {
+       u8 src_gate;
+       u8 dest_host;
+       u8 dest_gate;
+} __packed;
+
+struct nci_hci_create_pipe_resp {
+       u8 src_host;
+       u8 src_gate;
+       u8 dest_host;
+       u8 dest_gate;
+       u8 pipe;
+} __packed;
+
+struct nci_hci_delete_pipe_noti {
+       u8 pipe;
+} __packed;
+
+struct nci_hci_all_pipe_cleared_noti {
+       u8 host;
+} __packed;
+
+struct nci_hcp_message {
+       u8 header;      /* type -cmd,evt,rsp- + instruction */
+       u8 data[];
+} __packed;
+
+struct nci_hcp_packet {
+       u8 header;      /* cbit+pipe */
+       struct nci_hcp_message message;
+} __packed;
+
+#define NCI_HCI_ANY_SET_PARAMETER  0x01
+#define NCI_HCI_ANY_GET_PARAMETER  0x02
+#define NCI_HCI_ANY_CLOSE_PIPE     0x04
+
+#define NCI_HFP_NO_CHAINING        0x80
+
+#define NCI_NFCEE_ID_HCI                0x80
+
+#define NCI_EVT_HOT_PLUG           0x03
+
+#define NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY       0x01
+
+/* HCP headers */
+#define NCI_HCI_HCP_PACKET_HEADER_LEN      1
+#define NCI_HCI_HCP_MESSAGE_HEADER_LEN     1
+#define NCI_HCI_HCP_HEADER_LEN             2
+
+/* HCP types */
+#define NCI_HCI_HCP_COMMAND        0x00
+#define NCI_HCI_HCP_EVENT          0x01
+#define NCI_HCI_HCP_RESPONSE       0x02
+
+#define NCI_HCI_ADM_NOTIFY_PIPE_CREATED     0x12
+#define NCI_HCI_ADM_NOTIFY_PIPE_DELETED     0x13
+#define NCI_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED 0x15
+
+#define NCI_HCI_FRAGMENT           0x7f
+#define NCI_HCP_HEADER(type, instr) ((((type) & 0x03) << 6) |\
+                                     ((instr) & 0x3f))
+
+#define NCI_HCP_MSG_GET_TYPE(header) ((header & 0xc0) >> 6)
+#define NCI_HCP_MSG_GET_CMD(header)  (header & 0x3f)
+#define NCI_HCP_MSG_GET_PIPE(header) (header & 0x7f)
+
+/* HCI core */
+static void nci_hci_reset_pipes(struct nci_hci_dev *hdev)
+{
+       int i;
+
+       for (i = 0; i < NCI_HCI_MAX_PIPES; i++) {
+               hdev->pipes[i].gate = NCI_HCI_INVALID_GATE;
+               hdev->pipes[i].host = NCI_HCI_INVALID_HOST;
+       }
+       memset(hdev->gate2pipe, NCI_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+}
+
+static void nci_hci_reset_pipes_per_host(struct nci_dev *ndev, u8 host)
+{
+       int i;
+
+       for (i = 0; i < NCI_HCI_MAX_PIPES; i++) {
+               if (ndev->hci_dev->pipes[i].host == host) {
+                       ndev->hci_dev->pipes[i].gate = NCI_HCI_INVALID_GATE;
+                       ndev->hci_dev->pipes[i].host = NCI_HCI_INVALID_HOST;
+               }
+       }
+}
+
+/* Fragment HCI data over NCI packet.
+ * NFC Forum NCI 10.2.2 Data Exchange:
+ * The payload of the Data Packets sent on the Logical Connection SHALL be
+ * valid HCP packets, as defined within [ETSI_102622]. Each Data Packet SHALL
+ * contain a single HCP packet. NCI Segmentation and Reassembly SHALL NOT be
+ * applied to Data Messages in either direction. The HCI fragmentation mechanism
+ * is used if required.
+ */
+static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
+                            const u8 data_type, const u8 *data,
+                            size_t data_len)
+{
+       struct nci_conn_info    *conn_info;
+       struct sk_buff *skb;
+       int len, i, r;
+       u8 cb = pipe;
+
+       conn_info = ndev->hci_dev->conn_info;
+       if (!conn_info)
+               return -EPROTO;
+
+       skb = nci_skb_alloc(ndev, 2 + conn_info->max_pkt_payload_len +
+                           NCI_DATA_HDR_SIZE, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_reserve(skb, 2 + NCI_DATA_HDR_SIZE);
+       *skb_push(skb, 1) = data_type;
+
+       i = 0;
+       len = conn_info->max_pkt_payload_len;
+
+       do {
+               /* If last packet add NCI_HFP_NO_CHAINING */
+               if (i + conn_info->max_pkt_payload_len -
+                   (skb->len + 1) >= data_len) {
+                       cb |= NCI_HFP_NO_CHAINING;
+                       len = data_len - i;
+               } else {
+                       len = conn_info->max_pkt_payload_len - skb->len - 1;
+               }
+
+               *skb_push(skb, 1) = cb;
+
+               if (len > 0)
+                       memcpy(skb_put(skb, len), data + i, len);
+
+               r = nci_send_data(ndev, conn_info->conn_id, skb);
+               if (r < 0)
+                       return r;
+
+               i += len;
+               if (i < data_len) {
+                       skb_trim(skb, 0);
+                       skb_pull(skb, len);
+               }
+       } while (i < data_len);
+
+       return i;
+}
+
+static void nci_hci_send_data_req(struct nci_dev *ndev, unsigned long opt)
+{
+       struct nci_data *data = (struct nci_data *)opt;
+
+       nci_hci_send_data(ndev, data->pipe, data->cmd,
+                         data->data, data->data_len);
+}
+
+int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
+                      const u8 *param, size_t param_len)
+{
+       u8 pipe = ndev->hci_dev->gate2pipe[gate];
+
+       if (pipe == NCI_HCI_INVALID_PIPE)
+               return -EADDRNOTAVAIL;
+
+       return nci_hci_send_data(ndev, pipe,
+                       NCI_HCP_HEADER(NCI_HCI_HCP_EVENT, event),
+                       param, param_len);
+}
+EXPORT_SYMBOL(nci_hci_send_event);
+
+int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd,
+                    const u8 *param, size_t param_len,
+                    struct sk_buff **skb)
+{
+       struct nci_conn_info    *conn_info;
+       struct nci_data data;
+       int r;
+       u8 pipe = ndev->hci_dev->gate2pipe[gate];
+
+       if (pipe == NCI_HCI_INVALID_PIPE)
+               return -EADDRNOTAVAIL;
+
+       conn_info = ndev->hci_dev->conn_info;
+       if (!conn_info)
+               return -EPROTO;
+
+       data.conn_id = conn_info->conn_id;
+       data.pipe = pipe;
+       data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND, cmd);
+       data.data = param;
+       data.data_len = param_len;
+
+       r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
+                       msecs_to_jiffies(NCI_DATA_TIMEOUT));
+
+       if (r == NCI_STATUS_OK)
+               *skb = conn_info->rx_skb;
+
+       return r;
+}
+EXPORT_SYMBOL(nci_hci_send_cmd);
+
+static void nci_hci_event_received(struct nci_dev *ndev, u8 pipe,
+                                  u8 event, struct sk_buff *skb)
+{
+       if (ndev->ops->hci_event_received)
+               ndev->ops->hci_event_received(ndev, pipe, event, skb);
+}
+
+static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
+                                u8 cmd, struct sk_buff *skb)
+{
+       u8 gate = ndev->hci_dev->pipes[pipe].gate;
+       u8 status = NCI_HCI_ANY_OK | ~NCI_HCI_FRAGMENT;
+       u8 dest_gate, new_pipe;
+       struct nci_hci_create_pipe_resp *create_info;
+       struct nci_hci_delete_pipe_noti *delete_info;
+       struct nci_hci_all_pipe_cleared_noti *cleared_info;
+
+       pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd);
+
+       switch (cmd) {
+       case NCI_HCI_ADM_NOTIFY_PIPE_CREATED:
+               if (skb->len != 5) {
+                       status = NCI_HCI_ANY_E_NOK;
+                       goto exit;
+               }
+               create_info = (struct nci_hci_create_pipe_resp *)skb->data;
+               dest_gate = create_info->dest_gate;
+               new_pipe = create_info->pipe;
+
+               /* Save the new created pipe and bind with local gate,
+                * the description for skb->data[3] is destination gate id
+                * but since we received this cmd from host controller, we
+                * are the destination and it is our local gate
+                */
+               ndev->hci_dev->gate2pipe[dest_gate] = new_pipe;
+               ndev->hci_dev->pipes[new_pipe].gate = dest_gate;
+               ndev->hci_dev->pipes[new_pipe].host =
+                                               create_info->src_host;
+               break;
+       case NCI_HCI_ANY_OPEN_PIPE:
+               /* If the pipe is not created report an error */
+               if (gate == NCI_HCI_INVALID_GATE) {
+                       status = NCI_HCI_ANY_E_NOK;
+                       goto exit;
+               }
+               break;
+       case NCI_HCI_ADM_NOTIFY_PIPE_DELETED:
+               if (skb->len != 1) {
+                       status = NCI_HCI_ANY_E_NOK;
+                       goto exit;
+               }
+               delete_info = (struct nci_hci_delete_pipe_noti *)skb->data;
+
+               ndev->hci_dev->pipes[delete_info->pipe].gate =
+                                               NCI_HCI_INVALID_GATE;
+               ndev->hci_dev->pipes[delete_info->pipe].host =
+                                               NCI_HCI_INVALID_HOST;
+               break;
+       case NCI_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED:
+               if (skb->len != 1) {
+                       status = NCI_HCI_ANY_E_NOK;
+                       goto exit;
+               }
+
+               cleared_info =
+                       (struct nci_hci_all_pipe_cleared_noti *)skb->data;
+               nci_hci_reset_pipes_per_host(ndev, cleared_info->host);
+               break;
+       default:
+               pr_debug("Discarded unknown cmd %x to gate %x\n", cmd, gate);
+               break;
+       }
+
+       if (ndev->ops->hci_cmd_received)
+               ndev->ops->hci_cmd_received(ndev, pipe, cmd, skb);
+
+exit:
+       nci_hci_send_data(ndev, pipe, status, NULL, 0);
+
+       kfree_skb(skb);
+}
+
+static void nci_hci_resp_received(struct nci_dev *ndev, u8 pipe,
+                                 u8 result, struct sk_buff *skb)
+{
+       struct nci_conn_info    *conn_info;
+       u8 status = result;
+
+       if (result != NCI_HCI_ANY_OK)
+               goto exit;
+
+       conn_info = ndev->hci_dev->conn_info;
+       if (!conn_info) {
+               status = NCI_STATUS_REJECTED;
+               goto exit;
+       }
+
+       conn_info->rx_skb = skb;
+
+exit:
+       nci_req_complete(ndev, status);
+}
+
+/* Receive hcp message for pipe, with type and cmd.
+ * skb contains optional message data only.
+ */
+static void nci_hci_hcp_message_rx(struct nci_dev *ndev, u8 pipe,
+                                  u8 type, u8 instruction, struct sk_buff *skb)
+{
+       switch (type) {
+       case NCI_HCI_HCP_RESPONSE:
+               nci_hci_resp_received(ndev, pipe, instruction, skb);
+               break;
+       case NCI_HCI_HCP_COMMAND:
+               nci_hci_cmd_received(ndev, pipe, instruction, skb);
+               break;
+       case NCI_HCI_HCP_EVENT:
+               nci_hci_event_received(ndev, pipe, instruction, skb);
+               break;
+       default:
+               pr_err("UNKNOWN MSG Type %d, instruction=%d\n",
+                      type, instruction);
+               kfree_skb(skb);
+               break;
+       }
+
+       nci_req_complete(ndev, 0);
+}
+
+static void nci_hci_msg_rx_work(struct work_struct *work)
+{
+       struct nci_hci_dev *hdev =
+               container_of(work, struct nci_hci_dev, msg_rx_work);
+       struct sk_buff *skb;
+       struct nci_hcp_message *message;
+       u8 pipe, type, instruction;
+
+       while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) {
+               pipe = skb->data[0];
+               skb_pull(skb, NCI_HCI_HCP_PACKET_HEADER_LEN);
+               message = (struct nci_hcp_message *)skb->data;
+               type = NCI_HCP_MSG_GET_TYPE(message->header);
+               instruction = NCI_HCP_MSG_GET_CMD(message->header);
+               skb_pull(skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN);
+
+               nci_hci_hcp_message_rx(hdev->ndev, pipe,
+                                      type, instruction, skb);
+       }
+}
+
+void nci_hci_data_received_cb(void *context,
+                             struct sk_buff *skb, int err)
+{
+       struct nci_dev *ndev = (struct nci_dev *)context;
+       struct nci_hcp_packet *packet;
+       u8 pipe, type, instruction;
+       struct sk_buff *hcp_skb;
+       struct sk_buff *frag_skb;
+       int msg_len;
+
+       pr_debug("\n");
+
+       if (err) {
+               nci_req_complete(ndev, err);
+               return;
+       }
+
+       packet = (struct nci_hcp_packet *)skb->data;
+       if ((packet->header & ~NCI_HCI_FRAGMENT) == 0) {
+               skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb);
+               return;
+       }
+
+       /* it's the last fragment. Does it need re-aggregation? */
+       if (skb_queue_len(&ndev->hci_dev->rx_hcp_frags)) {
+               pipe = packet->header & NCI_HCI_FRAGMENT;
+               skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb);
+
+               msg_len = 0;
+               skb_queue_walk(&ndev->hci_dev->rx_hcp_frags, frag_skb) {
+                       msg_len += (frag_skb->len -
+                                   NCI_HCI_HCP_PACKET_HEADER_LEN);
+               }
+
+               hcp_skb = nfc_alloc_recv_skb(NCI_HCI_HCP_PACKET_HEADER_LEN +
+                                            msg_len, GFP_KERNEL);
+               if (!hcp_skb) {
+                       nci_req_complete(ndev, -ENOMEM);
+                       return;
+               }
+
+               *skb_put(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN) = pipe;
+
+               skb_queue_walk(&ndev->hci_dev->rx_hcp_frags, frag_skb) {
+                      msg_len = frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN;
+                       memcpy(skb_put(hcp_skb, msg_len), frag_skb->data +
+                              NCI_HCI_HCP_PACKET_HEADER_LEN, msg_len);
+               }
+
+               skb_queue_purge(&ndev->hci_dev->rx_hcp_frags);
+       } else {
+               packet->header &= NCI_HCI_FRAGMENT;
+               hcp_skb = skb;
+       }
+
+       /* if this is a response, dispatch immediately to
+        * unblock waiting cmd context. Otherwise, enqueue to dispatch
+        * in separate context where handler can also execute command.
+        */
+       packet = (struct nci_hcp_packet *)hcp_skb->data;
+       type = NCI_HCP_MSG_GET_TYPE(packet->message.header);
+       if (type == NCI_HCI_HCP_RESPONSE) {
+               pipe = packet->header;
+               instruction = NCI_HCP_MSG_GET_CMD(packet->message.header);
+               skb_pull(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN +
+                        NCI_HCI_HCP_MESSAGE_HEADER_LEN);
+               nci_hci_hcp_message_rx(ndev, pipe, type, instruction, hcp_skb);
+       } else {
+               skb_queue_tail(&ndev->hci_dev->msg_rx_queue, hcp_skb);
+               schedule_work(&ndev->hci_dev->msg_rx_work);
+       }
+}
+
+int nci_hci_open_pipe(struct nci_dev *ndev, u8 pipe)
+{
+       struct nci_data data;
+       struct nci_conn_info    *conn_info;
+
+       conn_info = ndev->hci_dev->conn_info;
+       if (!conn_info)
+               return -EPROTO;
+
+       data.conn_id = conn_info->conn_id;
+       data.pipe = pipe;
+       data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND,
+                                      NCI_HCI_ANY_OPEN_PIPE);
+       data.data = NULL;
+       data.data_len = 0;
+
+       return nci_request(ndev, nci_hci_send_data_req,
+                       (unsigned long)&data,
+                       msecs_to_jiffies(NCI_DATA_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_hci_open_pipe);
+
+int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx,
+                     const u8 *param, size_t param_len)
+{
+       struct nci_conn_info *conn_info;
+       struct nci_data data;
+       int r;
+       u8 *tmp;
+       u8 pipe = ndev->hci_dev->gate2pipe[gate];
+
+       pr_debug("idx=%d to gate %d\n", idx, gate);
+
+       if (pipe == NCI_HCI_INVALID_PIPE)
+               return -EADDRNOTAVAIL;
+
+       conn_info = ndev->hci_dev->conn_info;
+       if (!conn_info)
+               return -EPROTO;
+
+       tmp = kmalloc(1 + param_len, GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
+       *tmp = idx;
+       memcpy(tmp + 1, param, param_len);
+
+       data.conn_id = conn_info->conn_id;
+       data.pipe = pipe;
+       data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND,
+                                      NCI_HCI_ANY_SET_PARAMETER);
+       data.data = tmp;
+       data.data_len = param_len + 1;
+
+       r = nci_request(ndev, nci_hci_send_data_req,
+                       (unsigned long)&data,
+                       msecs_to_jiffies(NCI_DATA_TIMEOUT));
+
+       kfree(tmp);
+       return r;
+}
+EXPORT_SYMBOL(nci_hci_set_param);
+
+int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx,
+                     struct sk_buff **skb)
+{
+       struct nci_conn_info    *conn_info;
+       struct nci_data data;
+       int r;
+       u8 pipe = ndev->hci_dev->gate2pipe[gate];
+
+       pr_debug("idx=%d to gate %d\n", idx, gate);
+
+       if (pipe == NCI_HCI_INVALID_PIPE)
+               return -EADDRNOTAVAIL;
+
+       conn_info = ndev->hci_dev->conn_info;
+       if (!conn_info)
+               return -EPROTO;
+
+       data.conn_id = conn_info->conn_id;
+       data.pipe = pipe;
+       data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND,
+                                 NCI_HCI_ANY_GET_PARAMETER);
+       data.data = &idx;
+       data.data_len = 1;
+
+       r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
+                       msecs_to_jiffies(NCI_DATA_TIMEOUT));
+
+       if (r == NCI_STATUS_OK)
+               *skb = conn_info->rx_skb;
+
+       return r;
+}
+EXPORT_SYMBOL(nci_hci_get_param);
+
+int nci_hci_connect_gate(struct nci_dev *ndev,
+                        u8 dest_host, u8 dest_gate, u8 pipe)
+{
+       int r;
+
+       if (pipe == NCI_HCI_DO_NOT_OPEN_PIPE)
+               return 0;
+
+       if (ndev->hci_dev->gate2pipe[dest_gate] != NCI_HCI_INVALID_PIPE)
+               return -EADDRINUSE;
+
+       if (pipe != NCI_HCI_INVALID_PIPE)
+               goto open_pipe;
+
+       switch (dest_gate) {
+       case NCI_HCI_LINK_MGMT_GATE:
+               pipe = NCI_HCI_LINK_MGMT_PIPE;
+       break;
+       case NCI_HCI_ADMIN_GATE:
+               pipe = NCI_HCI_ADMIN_PIPE;
+       break;
+       }
+
+open_pipe:
+       r = nci_hci_open_pipe(ndev, pipe);
+       if (r < 0)
+               return r;
+
+       ndev->hci_dev->pipes[pipe].gate = dest_gate;
+       ndev->hci_dev->pipes[pipe].host = dest_host;
+       ndev->hci_dev->gate2pipe[dest_gate] = pipe;
+
+       return 0;
+}
+EXPORT_SYMBOL(nci_hci_connect_gate);
+
+static int nci_hci_dev_connect_gates(struct nci_dev *ndev,
+                                    u8 gate_count,
+                                    struct nci_hci_gate *gates)
+{
+       int r;
+
+       while (gate_count--) {
+               r = nci_hci_connect_gate(ndev, gates->dest_host,
+                                        gates->gate, gates->pipe);
+               if (r < 0)
+                       return r;
+               gates++;
+       }
+
+       return 0;
+}
+
+int nci_hci_dev_session_init(struct nci_dev *ndev)
+{
+       struct nci_conn_info    *conn_info;
+       struct sk_buff *skb;
+       int r;
+
+       ndev->hci_dev->count_pipes = 0;
+       ndev->hci_dev->expected_pipes = 0;
+
+       conn_info = ndev->hci_dev->conn_info;
+       if (!conn_info)
+               return -EPROTO;
+
+       conn_info->data_exchange_cb = nci_hci_data_received_cb;
+       conn_info->data_exchange_cb_context = ndev;
+
+       nci_hci_reset_pipes(ndev->hci_dev);
+
+       if (ndev->hci_dev->init_data.gates[0].gate != NCI_HCI_ADMIN_GATE)
+               return -EPROTO;
+
+       r = nci_hci_connect_gate(ndev,
+                                ndev->hci_dev->init_data.gates[0].dest_host,
+                                ndev->hci_dev->init_data.gates[0].gate,
+                                ndev->hci_dev->init_data.gates[0].pipe);
+       if (r < 0)
+               goto exit;
+
+       r = nci_hci_get_param(ndev, NCI_HCI_ADMIN_GATE,
+                             NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY, &skb);
+       if (r < 0)
+               goto exit;
+
+       if (skb->len &&
+           skb->len == strlen(ndev->hci_dev->init_data.session_id) &&
+           memcmp(ndev->hci_dev->init_data.session_id,
+                  skb->data, skb->len) == 0 &&
+           ndev->ops->hci_load_session) {
+               /* Restore gate<->pipe table from some proprietary location. */
+               r = ndev->ops->hci_load_session(ndev);
+               if (r < 0)
+                       goto exit;
+       } else {
+               r = nci_hci_dev_connect_gates(ndev,
+                                             ndev->hci_dev->init_data.gate_count,
+                                             ndev->hci_dev->init_data.gates);
+               if (r < 0)
+                       goto exit;
+
+               r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE,
+                                     NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY,
+                                     ndev->hci_dev->init_data.session_id,
+                                     strlen(ndev->hci_dev->init_data.session_id));
+       }
+       if (r == 0)
+               goto exit;
+
+exit:
+       kfree_skb(skb);
+
+       return r;
+}
+EXPORT_SYMBOL(nci_hci_dev_session_init);
+
+struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev)
+{
+       struct nci_hci_dev *hdev;
+
+       hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
+       if (!hdev)
+               return NULL;
+
+       skb_queue_head_init(&hdev->rx_hcp_frags);
+       INIT_WORK(&hdev->msg_rx_work, nci_hci_msg_rx_work);
+       skb_queue_head_init(&hdev->msg_rx_queue);
+       hdev->ndev = ndev;
+
+       return hdev;
+}
index 22e453cb787d4d62e3246919f32ae5db14557ef3..3218071072ac698a85e88fdb5c1b7e6d55a6217d 100644 (file)
@@ -43,6 +43,7 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
                                             struct sk_buff *skb)
 {
        struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
+       struct nci_conn_info    *conn_info;
        int i;
 
        pr_debug("num_entries %d\n", ntf->num_entries);
@@ -59,11 +60,13 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
                         i, ntf->conn_entries[i].conn_id,
                         ntf->conn_entries[i].credits);
 
-               if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) {
-                       /* found static rf connection */
-                       atomic_add(ntf->conn_entries[i].credits,
-                                  &ndev->credits_cnt);
-               }
+               conn_info = nci_get_conn_info_by_conn_id(ndev,
+                                                        ntf->conn_entries[i].conn_id);
+               if (!conn_info)
+                       return;
+
+               atomic_add(ntf->conn_entries[i].credits,
+                          &conn_info->credits_cnt);
        }
 
        /* trigger the next tx */
@@ -96,7 +99,7 @@ static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
 
        /* complete the data exchange transaction, if exists */
        if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
-               nci_data_exchange_complete(ndev, NULL, -EIO);
+               nci_data_exchange_complete(ndev, NULL, ntf->conn_id, -EIO);
 }
 
 static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
@@ -513,6 +516,7 @@ static int nci_store_general_bytes_nfc_dep(struct nci_dev *ndev,
 static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
                                             struct sk_buff *skb)
 {
+       struct nci_conn_info    *conn_info;
        struct nci_rf_intf_activated_ntf ntf;
        __u8 *data = skb->data;
        int err = NCI_STATUS_OK;
@@ -537,6 +541,13 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
        pr_debug("rf_tech_specific_params_len %d\n",
                 ntf.rf_tech_specific_params_len);
 
+       /* If this contains a value of 0x00 (NFCEE Direct RF
+        * Interface) then all following parameters SHALL contain a
+        * value of 0 and SHALL be ignored.
+        */
+       if (ntf.rf_interface == NCI_RF_INTERFACE_NFCEE_DIRECT)
+               goto listen;
+
        if (ntf.rf_tech_specific_params_len > 0) {
                switch (ntf.activation_rf_tech_and_mode) {
                case NCI_NFC_A_PASSIVE_POLL_MODE:
@@ -614,11 +625,16 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
 
 exit:
        if (err == NCI_STATUS_OK) {
-               ndev->max_data_pkt_payload_size = ntf.max_data_pkt_payload_size;
-               ndev->initial_num_credits = ntf.initial_num_credits;
+               conn_info = ndev->rf_conn_info;
+               if (!conn_info)
+                       return;
+
+               conn_info->max_pkt_payload_len = ntf.max_data_pkt_payload_size;
+               conn_info->initial_num_credits = ntf.initial_num_credits;
 
                /* set the available credits to initial value */
-               atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
+               atomic_set(&conn_info->credits_cnt,
+                          conn_info->initial_num_credits);
 
                /* store general bytes to be reported later in dep_link_up */
                if (ntf.rf_interface == NCI_RF_INTERFACE_NFC_DEP) {
@@ -643,6 +659,7 @@ exit:
                        nci_req_complete(ndev, err);
                }
        } else {
+listen:
                /* Listen mode */
                atomic_set(&ndev->state, NCI_LISTEN_ACTIVE);
                if (err == NCI_STATUS_OK &&
@@ -661,10 +678,15 @@ exit:
 static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
                                         struct sk_buff *skb)
 {
+       struct nci_conn_info    *conn_info;
        struct nci_rf_deactivate_ntf *ntf = (void *) skb->data;
 
        pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason);
 
+       conn_info = ndev->rf_conn_info;
+       if (!conn_info)
+               return;
+
        /* drop tx data queue */
        skb_queue_purge(&ndev->tx_q);
 
@@ -676,7 +698,8 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
 
        /* complete the data exchange transaction, if exists */
        if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
-               nci_data_exchange_complete(ndev, NULL, -EIO);
+               nci_data_exchange_complete(ndev, NULL, NCI_STATIC_RF_CONN_ID,
+                                          -EIO);
 
        switch (ntf->type) {
        case NCI_DEACTIVATE_TYPE_IDLE_MODE:
@@ -696,6 +719,32 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
        nci_req_complete(ndev, NCI_STATUS_OK);
 }
 
+static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
+                                         struct sk_buff *skb)
+{
+       u8 status = NCI_STATUS_OK;
+       struct nci_nfcee_discover_ntf   *nfcee_ntf =
+                               (struct nci_nfcee_discover_ntf *)skb->data;
+
+       pr_debug("\n");
+
+       /* NFCForum NCI 9.2.1 HCI Network Specific Handling
+        * If the NFCC supports the HCI Network, it SHALL return one,
+        * and only one, NFCEE_DISCOVER_NTF with a Protocol type of
+        * “HCI Access”, even if the HCI Network contains multiple NFCEEs.
+        */
+       ndev->hci_dev->nfcee_id = nfcee_ntf->nfcee_id;
+       ndev->cur_id = nfcee_ntf->nfcee_id;
+
+       nci_req_complete(ndev, status);
+}
+
+static void nci_nfcee_action_ntf_packet(struct nci_dev *ndev,
+                                       struct sk_buff *skb)
+{
+       pr_debug("\n");
+}
+
 void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
 {
        __u16 ntf_opcode = nci_opcode(skb->data);
@@ -734,6 +783,14 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
                nci_rf_deactivate_ntf_packet(ndev, skb);
                break;
 
+       case NCI_OP_NFCEE_DISCOVER_NTF:
+               nci_nfcee_discover_ntf_packet(ndev, skb);
+               break;
+
+       case NCI_OP_RF_NFCEE_ACTION_NTF:
+               nci_nfcee_action_ntf_packet(ndev, skb);
+               break;
+
        default:
                pr_err("unknown ntf opcode 0x%x\n", ntf_opcode);
                break;
index 041de51ccdbe103de6665c8f8a9537e194de5478..02486bc2ceea961200169d599fce407922fedcb6 100644 (file)
@@ -140,13 +140,31 @@ static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
 
 static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
 {
+       struct nci_conn_info    *conn_info;
        __u8 status = skb->data[0];
 
        pr_debug("status 0x%x\n", status);
 
-       if (status == NCI_STATUS_OK)
+       if (status == NCI_STATUS_OK) {
                atomic_set(&ndev->state, NCI_DISCOVERY);
 
+               conn_info = ndev->rf_conn_info;
+               if (!conn_info) {
+                       conn_info = devm_kzalloc(&ndev->nfc_dev->dev,
+                                                sizeof(struct nci_conn_info),
+                                                GFP_KERNEL);
+                       if (!conn_info) {
+                               status = NCI_STATUS_REJECTED;
+                               goto exit;
+                       }
+                       conn_info->conn_id = NCI_STATIC_RF_CONN_ID;
+                       INIT_LIST_HEAD(&conn_info->list);
+                       list_add(&conn_info->list, &ndev->conn_info_list);
+                       ndev->rf_conn_info = conn_info;
+               }
+       }
+
+exit:
        nci_req_complete(ndev, status);
 }
 
@@ -178,6 +196,90 @@ static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev,
        }
 }
 
+static void nci_nfcee_discover_rsp_packet(struct nci_dev *ndev,
+                                         struct sk_buff *skb)
+{
+       struct nci_nfcee_discover_rsp *discover_rsp;
+
+       if (skb->len != 2) {
+               nci_req_complete(ndev, NCI_STATUS_NFCEE_PROTOCOL_ERROR);
+               return;
+       }
+
+       discover_rsp = (struct nci_nfcee_discover_rsp *)skb->data;
+
+       if (discover_rsp->status != NCI_STATUS_OK ||
+           discover_rsp->num_nfcee == 0)
+               nci_req_complete(ndev, discover_rsp->status);
+}
+
+static void nci_nfcee_mode_set_rsp_packet(struct nci_dev *ndev,
+                                         struct sk_buff *skb)
+{
+       __u8 status = skb->data[0];
+
+       pr_debug("status 0x%x\n", status);
+       nci_req_complete(ndev, status);
+}
+
+static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev,
+                                           struct sk_buff *skb)
+{
+       __u8 status = skb->data[0];
+       struct nci_conn_info *conn_info;
+       struct nci_core_conn_create_rsp *rsp;
+
+       pr_debug("status 0x%x\n", status);
+
+       if (status == NCI_STATUS_OK) {
+               rsp = (struct nci_core_conn_create_rsp *)skb->data;
+
+               conn_info = devm_kzalloc(&ndev->nfc_dev->dev,
+                                        sizeof(*conn_info), GFP_KERNEL);
+               if (!conn_info) {
+                       status = NCI_STATUS_REJECTED;
+                       goto exit;
+               }
+
+               conn_info->id = ndev->cur_id;
+               conn_info->conn_id = rsp->conn_id;
+
+               /* Note: data_exchange_cb and data_exchange_cb_context need to
+                * be specify out of nci_core_conn_create_rsp_packet
+                */
+
+               INIT_LIST_HEAD(&conn_info->list);
+               list_add(&conn_info->list, &ndev->conn_info_list);
+
+               if (ndev->cur_id == ndev->hci_dev->nfcee_id)
+                       ndev->hci_dev->conn_info = conn_info;
+
+               conn_info->conn_id = rsp->conn_id;
+               conn_info->max_pkt_payload_len = rsp->max_ctrl_pkt_payload_len;
+               atomic_set(&conn_info->credits_cnt, rsp->credits_cnt);
+       }
+
+exit:
+       nci_req_complete(ndev, status);
+}
+
+static void nci_core_conn_close_rsp_packet(struct nci_dev *ndev,
+                                          struct sk_buff *skb)
+{
+       struct nci_conn_info *conn_info;
+       __u8 status = skb->data[0];
+
+       pr_debug("status 0x%x\n", status);
+       if (status == NCI_STATUS_OK) {
+               conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_id);
+               if (conn_info) {
+                       list_del(&conn_info->list);
+                       devm_kfree(&ndev->nfc_dev->dev, conn_info);
+               }
+       }
+       nci_req_complete(ndev, status);
+}
+
 void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
 {
        __u16 rsp_opcode = nci_opcode(skb->data);
@@ -207,6 +309,14 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
                nci_core_set_config_rsp_packet(ndev, skb);
                break;
 
+       case NCI_OP_CORE_CONN_CREATE_RSP:
+               nci_core_conn_create_rsp_packet(ndev, skb);
+               break;
+
+       case NCI_OP_CORE_CONN_CLOSE_RSP:
+               nci_core_conn_close_rsp_packet(ndev, skb);
+               break;
+
        case NCI_OP_RF_DISCOVER_MAP_RSP:
                nci_rf_disc_map_rsp_packet(ndev, skb);
                break;
@@ -223,6 +333,14 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
                nci_rf_deactivate_rsp_packet(ndev, skb);
                break;
 
+       case NCI_OP_NFCEE_DISCOVER_RSP:
+               nci_nfcee_discover_rsp_packet(ndev, skb);
+               break;
+
+       case NCI_OP_NFCEE_MODE_SET_RSP:
+               nci_nfcee_mode_set_rsp_packet(ndev, skb);
+               break;
+
        default:
                pr_err("unknown rsp opcode 0x%x\n", rsp_opcode);
                break;
index 44989fc8cddf19550a7a525503f9d267099470b2..14a2d11581da7ededf0eff4ca09293a277853b2d 100644 (file)
@@ -102,7 +102,8 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
                        goto nla_put_failure;
        }
 
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
 nla_put_failure:
        genlmsg_cancel(msg, hdr);
@@ -496,6 +497,53 @@ free_msg:
        return -EMSGSIZE;
 }
 
+int nfc_genl_se_transaction(struct nfc_dev *dev, u8 se_idx,
+                           struct nfc_evt_transaction *evt_transaction)
+{
+       struct nfc_se *se;
+       struct sk_buff *msg;
+       void *hdr;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+                         NFC_EVENT_SE_TRANSACTION);
+       if (!hdr)
+               goto free_msg;
+
+       se = nfc_find_se(dev, se_idx);
+       if (!se)
+               goto free_msg;
+
+       if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
+           nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx) ||
+           nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type) ||
+           nla_put(msg, NFC_ATTR_SE_AID, evt_transaction->aid_len,
+                   evt_transaction->aid) ||
+           nla_put(msg, NFC_ATTR_SE_PARAMS, evt_transaction->params_len,
+                   evt_transaction->params))
+               goto nla_put_failure;
+
+       /* evt_transaction is no more used */
+       devm_kfree(&dev->dev, evt_transaction);
+
+       genlmsg_end(msg, hdr);
+
+       genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
+
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+free_msg:
+       /* evt_transaction is no more used */
+       devm_kfree(&dev->dev, evt_transaction);
+       nlmsg_free(msg);
+       return -EMSGSIZE;
+}
+
 static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
                                u32 portid, u32 seq,
                                struct netlink_callback *cb,
@@ -518,7 +566,8 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
            nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode))
                goto nla_put_failure;
 
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
 nla_put_failure:
        genlmsg_cancel(msg, hdr);
@@ -908,7 +957,8 @@ static int nfc_genl_send_params(struct sk_buff *msg,
            nla_put_u16(msg, NFC_ATTR_LLC_PARAM_MIUX, be16_to_cpu(local->miux)))
                goto nla_put_failure;
 
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
 nla_put_failure:
 
@@ -1247,8 +1297,7 @@ static int nfc_genl_send_se(struct sk_buff *msg, struct nfc_dev *dev,
                    nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type))
                        goto nla_put_failure;
 
-               if (genlmsg_end(msg, hdr) < 0)
-                       goto nla_put_failure;
+               genlmsg_end(msg, hdr);
        }
 
        return 0;
index 88d60064890e3a9ca93db4c88fdcf840e077fabe..a8ce80b47720a6425c857a660895418b1c00d84f 100644 (file)
@@ -100,6 +100,8 @@ int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list);
 
 int nfc_genl_se_added(struct nfc_dev *dev, u32 se_idx, u16 type);
 int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx);
+int nfc_genl_se_transaction(struct nfc_dev *dev, u8 se_idx,
+                           struct nfc_evt_transaction *evt_transaction);
 
 struct nfc_dev *nfc_get_device(unsigned int idx);
 
index 770064c837112ca23fb1bbecf75b2d84643e88b0..b491c1c296fe8f954872320b7aef4db00dfb6f4a 100644 (file)
@@ -185,10 +185,15 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
        return 0;
 }
 
-static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key,
-                   const __be32 *mpls_lse)
+/* 'KEY' must not have any bits set outside of the 'MASK' */
+#define MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
+#define SET_MASKED(OLD, KEY, MASK) ((OLD) = MASKED(OLD, KEY, MASK))
+
+static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
+                   const __be32 *mpls_lse, const __be32 *mask)
 {
        __be32 *stack;
+       __be32 lse;
        int err;
 
        err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
@@ -196,14 +201,16 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key,
                return err;
 
        stack = (__be32 *)skb_mpls_header(skb);
+       lse = MASKED(*stack, *mpls_lse, *mask);
        if (skb->ip_summed == CHECKSUM_COMPLETE) {
-               __be32 diff[] = { ~(*stack), *mpls_lse };
+               __be32 diff[] = { ~(*stack), lse };
+
                skb->csum = ~csum_partial((char *)diff, sizeof(diff),
                                          ~skb->csum);
        }
 
-       *stack = *mpls_lse;
-       key->mpls.top_lse = *mpls_lse;
+       *stack = lse;
+       flow_key->mpls.top_lse = lse;
        return 0;
 }
 
@@ -212,7 +219,7 @@ static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
        int err;
 
        err = skb_vlan_pop(skb);
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                invalidate_flow_key(key);
        else
                key->eth.tci = 0;
@@ -222,7 +229,7 @@ static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
                     const struct ovs_action_push_vlan *vlan)
 {
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                invalidate_flow_key(key);
        else
                key->eth.tci = vlan->vlan_tci;
@@ -230,23 +237,39 @@ static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
                             ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
 }
 
-static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *key,
-                       const struct ovs_key_ethernet *eth_key)
+/* 'src' is already properly masked. */
+static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
+{
+       u16 *dst = (u16 *)dst_;
+       const u16 *src = (const u16 *)src_;
+       const u16 *mask = (const u16 *)mask_;
+
+       SET_MASKED(dst[0], src[0], mask[0]);
+       SET_MASKED(dst[1], src[1], mask[1]);
+       SET_MASKED(dst[2], src[2], mask[2]);
+}
+
+static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
+                       const struct ovs_key_ethernet *key,
+                       const struct ovs_key_ethernet *mask)
 {
        int err;
+
        err = skb_ensure_writable(skb, ETH_HLEN);
        if (unlikely(err))
                return err;
 
        skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
 
-       ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
-       ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
+       ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
+                              mask->eth_src);
+       ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
+                              mask->eth_dst);
 
        ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
 
-       ether_addr_copy(key->eth.src, eth_key->eth_src);
-       ether_addr_copy(key->eth.dst, eth_key->eth_dst);
+       ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
+       ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
        return 0;
 }
 
@@ -304,6 +327,15 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
        }
 }
 
+static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
+                          const __be32 mask[4], __be32 masked[4])
+{
+       masked[0] = MASKED(old[0], addr[0], mask[0]);
+       masked[1] = MASKED(old[1], addr[1], mask[1]);
+       masked[2] = MASKED(old[2], addr[2], mask[2]);
+       masked[3] = MASKED(old[3], addr[3], mask[3]);
+}
+
 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
                          __be32 addr[4], const __be32 new_addr[4],
                          bool recalculate_csum)
@@ -315,29 +347,29 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
        memcpy(addr, new_addr, sizeof(__be32[4]));
 }
 
-static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc)
+static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
 {
-       nh->priority = tc >> 4;
-       nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4);
+       /* Bits 21-24 are always unmasked, so this retains their values. */
+       SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
+       SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
+       SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
 }
 
-static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl)
+static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
+                      u8 mask)
 {
-       nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16;
-       nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8;
-       nh->flow_lbl[2] = fl & 0x000000FF;
-}
+       new_ttl = MASKED(nh->ttl, new_ttl, mask);
 
-static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
-{
        csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
        nh->ttl = new_ttl;
 }
 
-static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key,
-                   const struct ovs_key_ipv4 *ipv4_key)
+static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
+                   const struct ovs_key_ipv4 *key,
+                   const struct ovs_key_ipv4 *mask)
 {
        struct iphdr *nh;
+       __be32 new_addr;
        int err;
 
        err = skb_ensure_writable(skb, skb_network_offset(skb) +
@@ -347,36 +379,49 @@ static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key,
 
        nh = ip_hdr(skb);
 
-       if (ipv4_key->ipv4_src != nh->saddr) {
-               set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
-               key->ipv4.addr.src = ipv4_key->ipv4_src;
-       }
+       /* Setting an IP addresses is typically only a side effect of
+        * matching on them in the current userspace implementation, so it
+        * makes sense to check if the value actually changed.
+        */
+       if (mask->ipv4_src) {
+               new_addr = MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
 
-       if (ipv4_key->ipv4_dst != nh->daddr) {
-               set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
-               key->ipv4.addr.dst = ipv4_key->ipv4_dst;
+               if (unlikely(new_addr != nh->saddr)) {
+                       set_ip_addr(skb, nh, &nh->saddr, new_addr);
+                       flow_key->ipv4.addr.src = new_addr;
+               }
        }
+       if (mask->ipv4_dst) {
+               new_addr = MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
 
-       if (ipv4_key->ipv4_tos != nh->tos) {
-               ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
-               key->ip.tos = nh->tos;
+               if (unlikely(new_addr != nh->daddr)) {
+                       set_ip_addr(skb, nh, &nh->daddr, new_addr);
+                       flow_key->ipv4.addr.dst = new_addr;
+               }
        }
-
-       if (ipv4_key->ipv4_ttl != nh->ttl) {
-               set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
-               key->ip.ttl = ipv4_key->ipv4_ttl;
+       if (mask->ipv4_tos) {
+               ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
+               flow_key->ip.tos = nh->tos;
+       }
+       if (mask->ipv4_ttl) {
+               set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
+               flow_key->ip.ttl = nh->ttl;
        }
 
        return 0;
 }
 
-static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key,
-                   const struct ovs_key_ipv6 *ipv6_key)
+static bool is_ipv6_mask_nonzero(const __be32 addr[4])
+{
+       return !!(addr[0] | addr[1] | addr[2] | addr[3]);
+}
+
+static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
+                   const struct ovs_key_ipv6 *key,
+                   const struct ovs_key_ipv6 *mask)
 {
        struct ipv6hdr *nh;
        int err;
-       __be32 *saddr;
-       __be32 *daddr;
 
        err = skb_ensure_writable(skb, skb_network_offset(skb) +
                                  sizeof(struct ipv6hdr));
@@ -384,71 +429,77 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key,
                return err;
 
        nh = ipv6_hdr(skb);
-       saddr = (__be32 *)&nh->saddr;
-       daddr = (__be32 *)&nh->daddr;
-
-       if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) {
-               set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
-                             ipv6_key->ipv6_src, true);
-               memcpy(&key->ipv6.addr.src, ipv6_key->ipv6_src,
-                      sizeof(ipv6_key->ipv6_src));
-       }
 
-       if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
+       /* Setting an IP addresses is typically only a side effect of
+        * matching on them in the current userspace implementation, so it
+        * makes sense to check if the value actually changed.
+        */
+       if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
+               __be32 *saddr = (__be32 *)&nh->saddr;
+               __be32 masked[4];
+
+               mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
+
+               if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
+                       set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
+                                     true);
+                       memcpy(&flow_key->ipv6.addr.src, masked,
+                              sizeof(flow_key->ipv6.addr.src));
+               }
+       }
+       if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
                unsigned int offset = 0;
                int flags = IP6_FH_F_SKIP_RH;
                bool recalc_csum = true;
-
-               if (ipv6_ext_hdr(nh->nexthdr))
-                       recalc_csum = ipv6_find_hdr(skb, &offset,
-                                                   NEXTHDR_ROUTING, NULL,
-                                                   &flags) != NEXTHDR_ROUTING;
-
-               set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
-                             ipv6_key->ipv6_dst, recalc_csum);
-               memcpy(&key->ipv6.addr.dst, ipv6_key->ipv6_dst,
-                      sizeof(ipv6_key->ipv6_dst));
+               __be32 *daddr = (__be32 *)&nh->daddr;
+               __be32 masked[4];
+
+               mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
+
+               if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
+                       if (ipv6_ext_hdr(nh->nexthdr))
+                               recalc_csum = (ipv6_find_hdr(skb, &offset,
+                                                            NEXTHDR_ROUTING,
+                                                            NULL, &flags)
+                                              != NEXTHDR_ROUTING);
+
+                       set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
+                                     recalc_csum);
+                       memcpy(&flow_key->ipv6.addr.dst, masked,
+                              sizeof(flow_key->ipv6.addr.dst));
+               }
+       }
+       if (mask->ipv6_tclass) {
+               ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
+               flow_key->ip.tos = ipv6_get_dsfield(nh);
+       }
+       if (mask->ipv6_label) {
+               set_ipv6_fl(nh, ntohl(key->ipv6_label),
+                           ntohl(mask->ipv6_label));
+               flow_key->ipv6.label =
+                   *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
+       }
+       if (mask->ipv6_hlimit) {
+               SET_MASKED(nh->hop_limit, key->ipv6_hlimit, mask->ipv6_hlimit);
+               flow_key->ip.ttl = nh->hop_limit;
        }
-
-       set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
-       key->ip.tos = ipv6_get_dsfield(nh);
-
-       set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
-       key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
-
-       nh->hop_limit = ipv6_key->ipv6_hlimit;
-       key->ip.ttl = ipv6_key->ipv6_hlimit;
        return 0;
 }
 
 /* Must follow skb_ensure_writable() since that can move the skb data. */
 static void set_tp_port(struct sk_buff *skb, __be16 *port,
-                        __be16 new_port, __sum16 *check)
+                       __be16 new_port, __sum16 *check)
 {
        inet_proto_csum_replace2(check, skb, *port, new_port, 0);
        *port = new_port;
-       skb_clear_hash(skb);
-}
-
-static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
-{
-       struct udphdr *uh = udp_hdr(skb);
-
-       if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
-               set_tp_port(skb, port, new_port, &uh->check);
-
-               if (!uh->check)
-                       uh->check = CSUM_MANGLED_0;
-       } else {
-               *port = new_port;
-               skb_clear_hash(skb);
-       }
 }
 
-static int set_udp(struct sk_buff *skb, struct sw_flow_key *key,
-                  const struct ovs_key_udp *udp_port_key)
+static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
+                  const struct ovs_key_udp *key,
+                  const struct ovs_key_udp *mask)
 {
        struct udphdr *uh;
+       __be16 src, dst;
        int err;
 
        err = skb_ensure_writable(skb, skb_transport_offset(skb) +
@@ -457,23 +508,40 @@ static int set_udp(struct sk_buff *skb, struct sw_flow_key *key,
                return err;
 
        uh = udp_hdr(skb);
-       if (udp_port_key->udp_src != uh->source) {
-               set_udp_port(skb, &uh->source, udp_port_key->udp_src);
-               key->tp.src = udp_port_key->udp_src;
-       }
+       /* Either of the masks is non-zero, so do not bother checking them. */
+       src = MASKED(uh->source, key->udp_src, mask->udp_src);
+       dst = MASKED(uh->dest, key->udp_dst, mask->udp_dst);
 
-       if (udp_port_key->udp_dst != uh->dest) {
-               set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
-               key->tp.dst = udp_port_key->udp_dst;
+       if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
+               if (likely(src != uh->source)) {
+                       set_tp_port(skb, &uh->source, src, &uh->check);
+                       flow_key->tp.src = src;
+               }
+               if (likely(dst != uh->dest)) {
+                       set_tp_port(skb, &uh->dest, dst, &uh->check);
+                       flow_key->tp.dst = dst;
+               }
+
+               if (unlikely(!uh->check))
+                       uh->check = CSUM_MANGLED_0;
+       } else {
+               uh->source = src;
+               uh->dest = dst;
+               flow_key->tp.src = src;
+               flow_key->tp.dst = dst;
        }
 
+       skb_clear_hash(skb);
+
        return 0;
 }
 
-static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key,
-                  const struct ovs_key_tcp *tcp_port_key)
+static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
+                  const struct ovs_key_tcp *key,
+                  const struct ovs_key_tcp *mask)
 {
        struct tcphdr *th;
+       __be16 src, dst;
        int err;
 
        err = skb_ensure_writable(skb, skb_transport_offset(skb) +
@@ -482,50 +550,49 @@ static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key,
                return err;
 
        th = tcp_hdr(skb);
-       if (tcp_port_key->tcp_src != th->source) {
-               set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
-               key->tp.src = tcp_port_key->tcp_src;
+       src = MASKED(th->source, key->tcp_src, mask->tcp_src);
+       if (likely(src != th->source)) {
+               set_tp_port(skb, &th->source, src, &th->check);
+               flow_key->tp.src = src;
        }
-
-       if (tcp_port_key->tcp_dst != th->dest) {
-               set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
-               key->tp.dst = tcp_port_key->tcp_dst;
+       dst = MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
+       if (likely(dst != th->dest)) {
+               set_tp_port(skb, &th->dest, dst, &th->check);
+               flow_key->tp.dst = dst;
        }
+       skb_clear_hash(skb);
 
        return 0;
 }
 
-static int set_sctp(struct sk_buff *skb, struct sw_flow_key *key,
-                   const struct ovs_key_sctp *sctp_port_key)
+static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
+                   const struct ovs_key_sctp *key,
+                   const struct ovs_key_sctp *mask)
 {
+       unsigned int sctphoff = skb_transport_offset(skb);
        struct sctphdr *sh;
+       __le32 old_correct_csum, new_csum, old_csum;
        int err;
-       unsigned int sctphoff = skb_transport_offset(skb);
 
        err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
        if (unlikely(err))
                return err;
 
        sh = sctp_hdr(skb);
-       if (sctp_port_key->sctp_src != sh->source ||
-           sctp_port_key->sctp_dst != sh->dest) {
-               __le32 old_correct_csum, new_csum, old_csum;
+       old_csum = sh->checksum;
+       old_correct_csum = sctp_compute_cksum(skb, sctphoff);
 
-               old_csum = sh->checksum;
-               old_correct_csum = sctp_compute_cksum(skb, sctphoff);
+       sh->source = MASKED(sh->source, key->sctp_src, mask->sctp_src);
+       sh->dest = MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
 
-               sh->source = sctp_port_key->sctp_src;
-               sh->dest = sctp_port_key->sctp_dst;
+       new_csum = sctp_compute_cksum(skb, sctphoff);
 
-               new_csum = sctp_compute_cksum(skb, sctphoff);
+       /* Carry any checksum errors through. */
+       sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
 
-               /* Carry any checksum errors through. */
-               sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
-
-               skb_clear_hash(skb);
-               key->tp.src = sctp_port_key->sctp_src;
-               key->tp.dst = sctp_port_key->sctp_dst;
-       }
+       skb_clear_hash(skb);
+       flow_key->tp.src = sh->source;
+       flow_key->tp.dst = sh->dest;
 
        return 0;
 }
@@ -653,52 +720,77 @@ static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
        key->ovs_flow_hash = hash;
 }
 
-static int execute_set_action(struct sk_buff *skb, struct sw_flow_key *key,
-                             const struct nlattr *nested_attr)
+static int execute_set_action(struct sk_buff *skb,
+                             struct sw_flow_key *flow_key,
+                             const struct nlattr *a)
+{
+       /* Only tunnel set execution is supported without a mask. */
+       if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
+               OVS_CB(skb)->egress_tun_info = nla_data(a);
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+/* Mask is at the midpoint of the data. */
+#define get_mask(a, type) ((const type)nla_data(a) + 1)
+
+static int execute_masked_set_action(struct sk_buff *skb,
+                                    struct sw_flow_key *flow_key,
+                                    const struct nlattr *a)
 {
        int err = 0;
 
-       switch (nla_type(nested_attr)) {
+       switch (nla_type(a)) {
        case OVS_KEY_ATTR_PRIORITY:
-               skb->priority = nla_get_u32(nested_attr);
-               key->phy.priority = skb->priority;
+               SET_MASKED(skb->priority, nla_get_u32(a), *get_mask(a, u32 *));
+               flow_key->phy.priority = skb->priority;
                break;
 
        case OVS_KEY_ATTR_SKB_MARK:
-               skb->mark = nla_get_u32(nested_attr);
-               key->phy.skb_mark = skb->mark;
+               SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
+               flow_key->phy.skb_mark = skb->mark;
                break;
 
        case OVS_KEY_ATTR_TUNNEL_INFO:
-               OVS_CB(skb)->egress_tun_info = nla_data(nested_attr);
+               /* Masked data not supported for tunnel. */
+               err = -EINVAL;
                break;
 
        case OVS_KEY_ATTR_ETHERNET:
-               err = set_eth_addr(skb, key, nla_data(nested_attr));
+               err = set_eth_addr(skb, flow_key, nla_data(a),
+                                  get_mask(a, struct ovs_key_ethernet *));
                break;
 
        case OVS_KEY_ATTR_IPV4:
-               err = set_ipv4(skb, key, nla_data(nested_attr));
+               err = set_ipv4(skb, flow_key, nla_data(a),
+                              get_mask(a, struct ovs_key_ipv4 *));
                break;
 
        case OVS_KEY_ATTR_IPV6:
-               err = set_ipv6(skb, key, nla_data(nested_attr));
+               err = set_ipv6(skb, flow_key, nla_data(a),
+                              get_mask(a, struct ovs_key_ipv6 *));
                break;
 
        case OVS_KEY_ATTR_TCP:
-               err = set_tcp(skb, key, nla_data(nested_attr));
+               err = set_tcp(skb, flow_key, nla_data(a),
+                             get_mask(a, struct ovs_key_tcp *));
                break;
 
        case OVS_KEY_ATTR_UDP:
-               err = set_udp(skb, key, nla_data(nested_attr));
+               err = set_udp(skb, flow_key, nla_data(a),
+                             get_mask(a, struct ovs_key_udp *));
                break;
 
        case OVS_KEY_ATTR_SCTP:
-               err = set_sctp(skb, key, nla_data(nested_attr));
+               err = set_sctp(skb, flow_key, nla_data(a),
+                              get_mask(a, struct ovs_key_sctp *));
                break;
 
        case OVS_KEY_ATTR_MPLS:
-               err = set_mpls(skb, key, nla_data(nested_attr));
+               err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
+                                                                   __be32 *));
                break;
        }
 
@@ -818,6 +910,11 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
                        err = execute_set_action(skb, key, nla_data(a));
                        break;
 
+               case OVS_ACTION_ATTR_SET_MASKED:
+               case OVS_ACTION_ATTR_SET_TO_MASKED:
+                       err = execute_masked_set_action(skb, key, nla_data(a));
+                       break;
+
                case OVS_ACTION_ATTR_SAMPLE:
                        err = sample(dp, skb, key, a);
                        break;
index 4e9a5f035cbcf144998ae7c22c90151429cf7031..ae5e77cdc0ca1f34ff7f9c99d65ba0c8bda9ace6 100644 (file)
@@ -65,6 +65,8 @@ static struct genl_family dp_packet_genl_family;
 static struct genl_family dp_flow_genl_family;
 static struct genl_family dp_datapath_genl_family;
 
+static const struct nla_policy flow_policy[];
+
 static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
        .name = OVS_FLOW_MCGROUP,
 };
@@ -419,7 +421,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
        if (!dp_ifindex)
                return -ENODEV;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (skb_vlan_tag_present(skb)) {
                nskb = skb_clone(skb, GFP_ATOMIC);
                if (!nskb)
                        return -ENOMEM;
@@ -461,10 +463,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
                             0, upcall_info->cmd);
        upcall->dp_ifindex = dp_ifindex;
 
-       nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
-       err = ovs_nla_put_flow(key, key, user_skb);
+       err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
        BUG_ON(err);
-       nla_nest_end(user_skb, nla);
 
        if (upcall_info->userdata)
                __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
@@ -524,7 +524,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
        struct vport *input_vport;
        int len;
        int err;
-       bool log = !a[OVS_FLOW_ATTR_PROBE];
+       bool log = !a[OVS_PACKET_ATTR_PROBE];
 
        err = -EINVAL;
        if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
@@ -610,6 +610,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
        [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
        [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
        [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
+       [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
 };
 
 static const struct genl_ops dp_packet_genl_ops[] = {
@@ -663,46 +664,48 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
        }
 }
 
-static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
+static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
 {
-       return NLMSG_ALIGN(sizeof(struct ovs_header))
-               + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_KEY */
-               + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_MASK */
-               + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
-               + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
-               + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
-               + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
+       return ovs_identifier_is_ufid(sfid) &&
+              !(ufid_flags & OVS_UFID_F_OMIT_KEY);
 }
 
-/* Called with ovs_mutex or RCU read lock. */
-static int ovs_flow_cmd_fill_match(const struct sw_flow *flow,
-                                  struct sk_buff *skb)
+static bool should_fill_mask(uint32_t ufid_flags)
 {
-       struct nlattr *nla;
-       int err;
+       return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
+}
 
-       /* Fill flow key. */
-       nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
-       if (!nla)
-               return -EMSGSIZE;
+static bool should_fill_actions(uint32_t ufid_flags)
+{
+       return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
+}
 
-       err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
-       if (err)
-               return err;
+static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
+                                   const struct sw_flow_id *sfid,
+                                   uint32_t ufid_flags)
+{
+       size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
 
-       nla_nest_end(skb, nla);
+       /* OVS_FLOW_ATTR_UFID */
+       if (sfid && ovs_identifier_is_ufid(sfid))
+               len += nla_total_size(sfid->ufid_len);
 
-       /* Fill flow mask. */
-       nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
-       if (!nla)
-               return -EMSGSIZE;
+       /* OVS_FLOW_ATTR_KEY */
+       if (!sfid || should_fill_key(sfid, ufid_flags))
+               len += nla_total_size(ovs_key_attr_size());
 
-       err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
-       if (err)
-               return err;
+       /* OVS_FLOW_ATTR_MASK */
+       if (should_fill_mask(ufid_flags))
+               len += nla_total_size(ovs_key_attr_size());
 
-       nla_nest_end(skb, nla);
-       return 0;
+       /* OVS_FLOW_ATTR_ACTIONS */
+       if (should_fill_actions(ufid_flags))
+               len += nla_total_size(acts->actions_len);
+
+       return len
+               + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
+               + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
+               + nla_total_size(8); /* OVS_FLOW_ATTR_USED */
 }
 
 /* Called with ovs_mutex or RCU read lock. */
@@ -773,7 +776,7 @@ static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
 /* Called with ovs_mutex or RCU read lock. */
 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
                                  struct sk_buff *skb, u32 portid,
-                                 u32 seq, u32 flags, u8 cmd)
+                                 u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
 {
        const int skb_orig_len = skb->len;
        struct ovs_header *ovs_header;
@@ -786,19 +789,34 @@ static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
 
        ovs_header->dp_ifindex = dp_ifindex;
 
-       err = ovs_flow_cmd_fill_match(flow, skb);
+       err = ovs_nla_put_identifier(flow, skb);
        if (err)
                goto error;
 
+       if (should_fill_key(&flow->id, ufid_flags)) {
+               err = ovs_nla_put_masked_key(flow, skb);
+               if (err)
+                       goto error;
+       }
+
+       if (should_fill_mask(ufid_flags)) {
+               err = ovs_nla_put_mask(flow, skb);
+               if (err)
+                       goto error;
+       }
+
        err = ovs_flow_cmd_fill_stats(flow, skb);
        if (err)
                goto error;
 
-       err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
-       if (err)
-               goto error;
+       if (should_fill_actions(ufid_flags)) {
+               err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
+               if (err)
+                       goto error;
+       }
 
-       return genlmsg_end(skb, ovs_header);
+       genlmsg_end(skb, ovs_header);
+       return 0;
 
 error:
        genlmsg_cancel(skb, ovs_header);
@@ -807,15 +825,19 @@ error:
 
 /* May not be called with RCU read lock. */
 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
+                                              const struct sw_flow_id *sfid,
                                               struct genl_info *info,
-                                              bool always)
+                                              bool always,
+                                              uint32_t ufid_flags)
 {
        struct sk_buff *skb;
+       size_t len;
 
        if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
                return NULL;
 
-       skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
+       len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
+       skb = genlmsg_new_unicast(len, info, GFP_KERNEL);
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
@@ -826,19 +848,19 @@ static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *act
 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
                                               int dp_ifindex,
                                               struct genl_info *info, u8 cmd,
-                                              bool always)
+                                              bool always, u32 ufid_flags)
 {
        struct sk_buff *skb;
        int retval;
 
-       skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
-                                     always);
+       skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
+                                     &flow->id, info, always, ufid_flags);
        if (IS_ERR_OR_NULL(skb))
                return skb;
 
        retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
                                        info->snd_portid, info->snd_seq, 0,
-                                       cmd);
+                                       cmd, ufid_flags);
        BUG_ON(retval < 0);
        return skb;
 }
@@ -847,12 +869,14 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
 {
        struct nlattr **a = info->attrs;
        struct ovs_header *ovs_header = info->userhdr;
-       struct sw_flow *flow, *new_flow;
+       struct sw_flow *flow = NULL, *new_flow;
        struct sw_flow_mask mask;
        struct sk_buff *reply;
        struct datapath *dp;
+       struct sw_flow_key key;
        struct sw_flow_actions *acts;
        struct sw_flow_match match;
+       u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
        int error;
        bool log = !a[OVS_FLOW_ATTR_PROBE];
 
@@ -877,13 +901,19 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
        }
 
        /* Extract key. */
-       ovs_match_init(&match, &new_flow->unmasked_key, &mask);
+       ovs_match_init(&match, &key, &mask);
        error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
                                  a[OVS_FLOW_ATTR_MASK], log);
        if (error)
                goto err_kfree_flow;
 
-       ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
+       ovs_flow_mask_key(&new_flow->key, &key, &mask);
+
+       /* Extract flow identifier. */
+       error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
+                                      &key, log);
+       if (error)
+               goto err_kfree_flow;
 
        /* Validate actions. */
        error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
@@ -893,7 +923,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
                goto err_kfree_flow;
        }
 
-       reply = ovs_flow_cmd_alloc_info(acts, info, false);
+       reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
+                                       ufid_flags);
        if (IS_ERR(reply)) {
                error = PTR_ERR(reply);
                goto err_kfree_acts;
@@ -905,8 +936,12 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
                error = -ENODEV;
                goto err_unlock_ovs;
        }
+
        /* Check if this is a duplicate flow */
-       flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key);
+       if (ovs_identifier_is_ufid(&new_flow->id))
+               flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
+       if (!flow)
+               flow = ovs_flow_tbl_lookup(&dp->table, &key);
        if (likely(!flow)) {
                rcu_assign_pointer(new_flow->sf_acts, acts);
 
@@ -922,7 +957,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
                                                       ovs_header->dp_ifindex,
                                                       reply, info->snd_portid,
                                                       info->snd_seq, 0,
-                                                      OVS_FLOW_CMD_NEW);
+                                                      OVS_FLOW_CMD_NEW,
+                                                      ufid_flags);
                        BUG_ON(error < 0);
                }
                ovs_unlock();
@@ -940,10 +976,15 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
                        error = -EEXIST;
                        goto err_unlock_ovs;
                }
-               /* The unmasked key has to be the same for flow updates. */
-               if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
-                       /* Look for any overlapping flow. */
-                       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+               /* The flow identifier has to be the same for flow updates.
+                * Look for any overlapping flow.
+                */
+               if (unlikely(!ovs_flow_cmp(flow, &match))) {
+                       if (ovs_identifier_is_key(&flow->id))
+                               flow = ovs_flow_tbl_lookup_exact(&dp->table,
+                                                                &match);
+                       else /* UFID matches but key is different */
+                               flow = NULL;
                        if (!flow) {
                                error = -ENOENT;
                                goto err_unlock_ovs;
@@ -958,7 +999,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
                                                       ovs_header->dp_ifindex,
                                                       reply, info->snd_portid,
                                                       info->snd_seq, 0,
-                                                      OVS_FLOW_CMD_NEW);
+                                                      OVS_FLOW_CMD_NEW,
+                                                      ufid_flags);
                        BUG_ON(error < 0);
                }
                ovs_unlock();
@@ -1014,8 +1056,11 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
        struct datapath *dp;
        struct sw_flow_actions *old_acts = NULL, *acts = NULL;
        struct sw_flow_match match;
+       struct sw_flow_id sfid;
+       u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
        int error;
        bool log = !a[OVS_FLOW_ATTR_PROBE];
+       bool ufid_present;
 
        /* Extract key. */
        error = -EINVAL;
@@ -1024,6 +1069,7 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
                goto error;
        }
 
+       ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
        ovs_match_init(&match, &key, &mask);
        error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
                                  a[OVS_FLOW_ATTR_MASK], log);
@@ -1040,7 +1086,8 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
                }
 
                /* Can allocate before locking if have acts. */
-               reply = ovs_flow_cmd_alloc_info(acts, info, false);
+               reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
+                                               ufid_flags);
                if (IS_ERR(reply)) {
                        error = PTR_ERR(reply);
                        goto err_kfree_acts;
@@ -1054,7 +1101,10 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
                goto err_unlock_ovs;
        }
        /* Check that the flow exists. */
-       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+       if (ufid_present)
+               flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
+       else
+               flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
        if (unlikely(!flow)) {
                error = -ENOENT;
                goto err_unlock_ovs;
@@ -1070,13 +1120,16 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
                                                       ovs_header->dp_ifindex,
                                                       reply, info->snd_portid,
                                                       info->snd_seq, 0,
-                                                      OVS_FLOW_CMD_NEW);
+                                                      OVS_FLOW_CMD_NEW,
+                                                      ufid_flags);
                        BUG_ON(error < 0);
                }
        } else {
                /* Could not alloc without acts before locking. */
                reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
-                                               info, OVS_FLOW_CMD_NEW, false);
+                                               info, OVS_FLOW_CMD_NEW, false,
+                                               ufid_flags);
+
                if (unlikely(IS_ERR(reply))) {
                        error = PTR_ERR(reply);
                        goto err_unlock_ovs;
@@ -1113,17 +1166,22 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
        struct sw_flow *flow;
        struct datapath *dp;
        struct sw_flow_match match;
-       int err;
+       struct sw_flow_id ufid;
+       u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
+       int err = 0;
        bool log = !a[OVS_FLOW_ATTR_PROBE];
+       bool ufid_present;
 
-       if (!a[OVS_FLOW_ATTR_KEY]) {
+       ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
+       if (a[OVS_FLOW_ATTR_KEY]) {
+               ovs_match_init(&match, &key, NULL);
+               err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL,
+                                       log);
+       } else if (!ufid_present) {
                OVS_NLERR(log,
                          "Flow get message rejected, Key attribute missing.");
-               return -EINVAL;
+               err = -EINVAL;
        }
-
-       ovs_match_init(&match, &key, NULL);
-       err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL, log);
        if (err)
                return err;
 
@@ -1134,14 +1192,17 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
                goto unlock;
        }
 
-       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+       if (ufid_present)
+               flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
+       else
+               flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
        if (!flow) {
                err = -ENOENT;
                goto unlock;
        }
 
        reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
-                                       OVS_FLOW_CMD_NEW, true);
+                                       OVS_FLOW_CMD_NEW, true, ufid_flags);
        if (IS_ERR(reply)) {
                err = PTR_ERR(reply);
                goto unlock;
@@ -1160,13 +1221,17 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
        struct ovs_header *ovs_header = info->userhdr;
        struct sw_flow_key key;
        struct sk_buff *reply;
-       struct sw_flow *flow;
+       struct sw_flow *flow = NULL;
        struct datapath *dp;
        struct sw_flow_match match;
+       struct sw_flow_id ufid;
+       u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
        int err;
        bool log = !a[OVS_FLOW_ATTR_PROBE];
+       bool ufid_present;
 
-       if (likely(a[OVS_FLOW_ATTR_KEY])) {
+       ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
+       if (a[OVS_FLOW_ATTR_KEY]) {
                ovs_match_init(&match, &key, NULL);
                err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL,
                                        log);
@@ -1181,12 +1246,15 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
                goto unlock;
        }
 
-       if (unlikely(!a[OVS_FLOW_ATTR_KEY])) {
+       if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
                err = ovs_flow_tbl_flush(&dp->table);
                goto unlock;
        }
 
-       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+       if (ufid_present)
+               flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
+       else
+               flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
        if (unlikely(!flow)) {
                err = -ENOENT;
                goto unlock;
@@ -1196,14 +1264,15 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
        ovs_unlock();
 
        reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
-                                       info, false);
+                                       &flow->id, info, false, ufid_flags);
        if (likely(reply)) {
                if (likely(!IS_ERR(reply))) {
                        rcu_read_lock();        /*To keep RCU checker happy. */
                        err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
                                                     reply, info->snd_portid,
                                                     info->snd_seq, 0,
-                                                    OVS_FLOW_CMD_DEL);
+                                                    OVS_FLOW_CMD_DEL,
+                                                    ufid_flags);
                        rcu_read_unlock();
                        BUG_ON(err < 0);
 
@@ -1222,9 +1291,18 @@ unlock:
 
 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct nlattr *a[__OVS_FLOW_ATTR_MAX];
        struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
        struct table_instance *ti;
        struct datapath *dp;
+       u32 ufid_flags;
+       int err;
+
+       err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a,
+                           OVS_FLOW_ATTR_MAX, flow_policy);
+       if (err)
+               return err;
+       ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
 
        rcu_read_lock();
        dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
@@ -1247,7 +1325,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
                if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
                                           NETLINK_CB(cb->skb).portid,
                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                          OVS_FLOW_CMD_NEW) < 0)
+                                          OVS_FLOW_CMD_NEW, ufid_flags) < 0)
                        break;
 
                cb->args[0] = bucket;
@@ -1263,6 +1341,8 @@ static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
        [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
        [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
        [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
+       [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
+       [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
 };
 
 static const struct genl_ops dp_flow_genl_ops[] = {
@@ -1348,7 +1428,8 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
        if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
                goto nla_put_failure;
 
-       return genlmsg_end(skb, ovs_header);
+       genlmsg_end(skb, ovs_header);
+       return 0;
 
 nla_put_failure:
        genlmsg_cancel(skb, ovs_header);
@@ -1722,7 +1803,8 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
        if (err == -EMSGSIZE)
                goto error;
 
-       return genlmsg_end(skb, ovs_header);
+       genlmsg_end(skb, ovs_header);
+       return 0;
 
 nla_put_failure:
        err = -EMSGSIZE;
index 70bef2ab7f2bc6017081ad780a8c5cb1aa9feb67..e2c348b8bacafac9c646f4861b55b76d4e1a51e9 100644 (file)
@@ -70,6 +70,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
 {
        struct flow_stats *stats;
        int node = numa_node_id();
+       int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
 
        stats = rcu_dereference(flow->stats[node]);
 
@@ -105,7 +106,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
                                if (likely(new_stats)) {
                                        new_stats->used = jiffies;
                                        new_stats->packet_count = 1;
-                                       new_stats->byte_count = skb->len;
+                                       new_stats->byte_count = len;
                                        new_stats->tcp_flags = tcp_flags;
                                        spin_lock_init(&new_stats->lock);
 
@@ -120,7 +121,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
 
        stats->used = jiffies;
        stats->packet_count++;
-       stats->byte_count += skb->len;
+       stats->byte_count += len;
        stats->tcp_flags |= tcp_flags;
 unlock:
        spin_unlock(&stats->lock);
@@ -471,7 +472,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
         */
 
        key->eth.tci = 0;
-       if (vlan_tx_tag_present(skb))
+       if (skb_vlan_tag_present(skb))
                key->eth.tci = htons(skb->vlan_tci);
        else if (eth->h_proto == htons(ETH_P_8021Q))
                if (unlikely(parse_vlan(skb, key)))
@@ -690,7 +691,7 @@ int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
                        BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
                                                   8)) - 1
                                        > sizeof(key->tun_opts));
-                       memcpy(GENEVE_OPTS(key, tun_info->options_len),
+                       memcpy(TUN_METADATA_OPTS(key, tun_info->options_len),
                               tun_info->options, tun_info->options_len);
                        key->tun_opts_len = tun_info->options_len;
                } else {
index a8b30f334388e44aea96fecf077801e1deec4c0e..a076e445ccc2e267f2664ddd6e81badc98a2636b 100644 (file)
@@ -53,7 +53,7 @@ struct ovs_key_ipv4_tunnel {
 
 struct ovs_tunnel_info {
        struct ovs_key_ipv4_tunnel tunnel;
-       const struct geneve_opt *options;
+       const void *options;
        u8 options_len;
 };
 
@@ -61,10 +61,10 @@ struct ovs_tunnel_info {
  * maximum size. This allows us to get the benefits of variable length
  * matching for small options.
  */
-#define GENEVE_OPTS(flow_key, opt_len) \
-       ((struct geneve_opt *)((flow_key)->tun_opts + \
-                              FIELD_SIZEOF(struct sw_flow_key, tun_opts) - \
-                              opt_len))
+#define TUN_METADATA_OFFSET(opt_len) \
+       (FIELD_SIZEOF(struct sw_flow_key, tun_opts) - opt_len)
+#define TUN_METADATA_OPTS(flow_key, opt_len) \
+       ((void *)((flow_key)->tun_opts + TUN_METADATA_OFFSET(opt_len)))
 
 static inline void __ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
                                            __be32 saddr, __be32 daddr,
@@ -73,7 +73,7 @@ static inline void __ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
                                            __be16 tp_dst,
                                            __be64 tun_id,
                                            __be16 tun_flags,
-                                           const struct geneve_opt *opts,
+                                           const void *opts,
                                            u8 opts_len)
 {
        tun_info->tunnel.tun_id = tun_id;
@@ -105,7 +105,7 @@ static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
                                          __be16 tp_dst,
                                          __be64 tun_id,
                                          __be16 tun_flags,
-                                         const struct geneve_opt *opts,
+                                         const void *opts,
                                          u8 opts_len)
 {
        __ovs_flow_tun_info_init(tun_info, iph->saddr, iph->daddr,
@@ -197,6 +197,16 @@ struct sw_flow_match {
        struct sw_flow_mask *mask;
 };
 
+#define MAX_UFID_LENGTH 16 /* 128 bits */
+
+struct sw_flow_id {
+       u32 ufid_len;
+       union {
+               u32 ufid[MAX_UFID_LENGTH / 4];
+               struct sw_flow_key *unmasked_key;
+       };
+};
+
 struct sw_flow_actions {
        struct rcu_head rcu;
        u32 actions_len;
@@ -213,13 +223,15 @@ struct flow_stats {
 
 struct sw_flow {
        struct rcu_head rcu;
-       struct hlist_node hash_node[2];
-       u32 hash;
+       struct {
+               struct hlist_node node[2];
+               u32 hash;
+       } flow_table, ufid_table;
        int stats_last_writer;          /* NUMA-node id of the last writer on
                                         * 'stats[0]'.
                                         */
        struct sw_flow_key key;
-       struct sw_flow_key unmasked_key;
+       struct sw_flow_id id;
        struct sw_flow_mask *mask;
        struct sw_flow_actions __rcu *sf_acts;
        struct flow_stats __rcu *stats[]; /* One for each NUMA node.  First one
@@ -243,6 +255,16 @@ struct arp_eth_header {
        unsigned char       ar_tip[4];          /* target IP address        */
 } __packed;
 
+static inline bool ovs_identifier_is_ufid(const struct sw_flow_id *sfid)
+{
+       return sfid->ufid_len;
+}
+
+static inline bool ovs_identifier_is_key(const struct sw_flow_id *sfid)
+{
+       return !ovs_identifier_is_ufid(sfid);
+}
+
 void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
                           const struct sk_buff *);
 void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
index d1eecf707613eb5cb0c68531f63cf46ff6f82c63..993281e6278dc829cfbcb43bca054bf1c8adb8ec 100644 (file)
 #include <net/mpls.h>
 
 #include "flow_netlink.h"
+#include "vport-vxlan.h"
+
+struct ovs_len_tbl {
+       int len;
+       const struct ovs_len_tbl *next;
+};
+
+#define OVS_ATTR_NESTED -1
 
 static void update_range(struct sw_flow_match *match,
                         size_t offset, size_t size, bool is_mask)
@@ -261,6 +269,9 @@ size_t ovs_tun_key_attr_size(void)
                + nla_total_size(0)    /* OVS_TUNNEL_KEY_ATTR_CSUM */
                + nla_total_size(0)    /* OVS_TUNNEL_KEY_ATTR_OAM */
                + nla_total_size(256)  /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
+               /* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS is mutually exclusive with
+                * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
+                */
                + nla_total_size(2)    /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
                + nla_total_size(2);   /* OVS_TUNNEL_KEY_ATTR_TP_DST */
 }
@@ -289,29 +300,45 @@ size_t ovs_key_attr_size(void)
                + nla_total_size(28); /* OVS_KEY_ATTR_ND */
 }
 
+static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
+       [OVS_TUNNEL_KEY_ATTR_ID]            = { .len = sizeof(u64) },
+       [OVS_TUNNEL_KEY_ATTR_IPV4_SRC]      = { .len = sizeof(u32) },
+       [OVS_TUNNEL_KEY_ATTR_IPV4_DST]      = { .len = sizeof(u32) },
+       [OVS_TUNNEL_KEY_ATTR_TOS]           = { .len = 1 },
+       [OVS_TUNNEL_KEY_ATTR_TTL]           = { .len = 1 },
+       [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
+       [OVS_TUNNEL_KEY_ATTR_CSUM]          = { .len = 0 },
+       [OVS_TUNNEL_KEY_ATTR_TP_SRC]        = { .len = sizeof(u16) },
+       [OVS_TUNNEL_KEY_ATTR_TP_DST]        = { .len = sizeof(u16) },
+       [OVS_TUNNEL_KEY_ATTR_OAM]           = { .len = 0 },
+       [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS]   = { .len = OVS_ATTR_NESTED },
+       [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS]    = { .len = OVS_ATTR_NESTED },
+};
+
 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */
-static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
-       [OVS_KEY_ATTR_ENCAP] = -1,
-       [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
-       [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
-       [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32),
-       [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
-       [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
-       [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
-       [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
-       [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
-       [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
-       [OVS_KEY_ATTR_TCP_FLAGS] = sizeof(__be16),
-       [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
-       [OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp),
-       [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
-       [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
-       [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
-       [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
-       [OVS_KEY_ATTR_RECIRC_ID] = sizeof(u32),
-       [OVS_KEY_ATTR_DP_HASH] = sizeof(u32),
-       [OVS_KEY_ATTR_TUNNEL] = -1,
-       [OVS_KEY_ATTR_MPLS] = sizeof(struct ovs_key_mpls),
+static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
+       [OVS_KEY_ATTR_ENCAP]     = { .len = OVS_ATTR_NESTED },
+       [OVS_KEY_ATTR_PRIORITY]  = { .len = sizeof(u32) },
+       [OVS_KEY_ATTR_IN_PORT]   = { .len = sizeof(u32) },
+       [OVS_KEY_ATTR_SKB_MARK]  = { .len = sizeof(u32) },
+       [OVS_KEY_ATTR_ETHERNET]  = { .len = sizeof(struct ovs_key_ethernet) },
+       [OVS_KEY_ATTR_VLAN]      = { .len = sizeof(__be16) },
+       [OVS_KEY_ATTR_ETHERTYPE] = { .len = sizeof(__be16) },
+       [OVS_KEY_ATTR_IPV4]      = { .len = sizeof(struct ovs_key_ipv4) },
+       [OVS_KEY_ATTR_IPV6]      = { .len = sizeof(struct ovs_key_ipv6) },
+       [OVS_KEY_ATTR_TCP]       = { .len = sizeof(struct ovs_key_tcp) },
+       [OVS_KEY_ATTR_TCP_FLAGS] = { .len = sizeof(__be16) },
+       [OVS_KEY_ATTR_UDP]       = { .len = sizeof(struct ovs_key_udp) },
+       [OVS_KEY_ATTR_SCTP]      = { .len = sizeof(struct ovs_key_sctp) },
+       [OVS_KEY_ATTR_ICMP]      = { .len = sizeof(struct ovs_key_icmp) },
+       [OVS_KEY_ATTR_ICMPV6]    = { .len = sizeof(struct ovs_key_icmpv6) },
+       [OVS_KEY_ATTR_ARP]       = { .len = sizeof(struct ovs_key_arp) },
+       [OVS_KEY_ATTR_ND]        = { .len = sizeof(struct ovs_key_nd) },
+       [OVS_KEY_ATTR_RECIRC_ID] = { .len = sizeof(u32) },
+       [OVS_KEY_ATTR_DP_HASH]   = { .len = sizeof(u32) },
+       [OVS_KEY_ATTR_TUNNEL]    = { .len = OVS_ATTR_NESTED,
+                                    .next = ovs_tunnel_key_lens, },
+       [OVS_KEY_ATTR_MPLS]      = { .len = sizeof(struct ovs_key_mpls) },
 };
 
 static bool is_all_zero(const u8 *fp, size_t size)
@@ -352,8 +379,8 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
                        return -EINVAL;
                }
 
-               expected_len = ovs_key_lens[type];
-               if (nla_len(nla) != expected_len && expected_len != -1) {
+               expected_len = ovs_key_lens[type].len;
+               if (nla_len(nla) != expected_len && expected_len != OVS_ATTR_NESTED) {
                        OVS_NLERR(log, "Key %d has unexpected len %d expected %d",
                                  type, nla_len(nla), expected_len);
                        return -EINVAL;
@@ -432,13 +459,47 @@ static int genev_tun_opt_from_nlattr(const struct nlattr *a,
                SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
        }
 
-       opt_key_offset = (unsigned long)GENEVE_OPTS((struct sw_flow_key *)0,
-                                                   nla_len(a));
+       opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
        SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
                                  nla_len(a), is_mask);
        return 0;
 }
 
+static const struct nla_policy vxlan_opt_policy[OVS_VXLAN_EXT_MAX + 1] = {
+       [OVS_VXLAN_EXT_GBP]     = { .type = NLA_U32 },
+};
+
+static int vxlan_tun_opt_from_nlattr(const struct nlattr *a,
+                                    struct sw_flow_match *match, bool is_mask,
+                                    bool log)
+{
+       struct nlattr *tb[OVS_VXLAN_EXT_MAX+1];
+       unsigned long opt_key_offset;
+       struct ovs_vxlan_opts opts;
+       int err;
+
+       BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
+
+       err = nla_parse_nested(tb, OVS_VXLAN_EXT_MAX, a, vxlan_opt_policy);
+       if (err < 0)
+               return err;
+
+       memset(&opts, 0, sizeof(opts));
+
+       if (tb[OVS_VXLAN_EXT_GBP])
+               opts.gbp = nla_get_u32(tb[OVS_VXLAN_EXT_GBP]);
+
+       if (!is_mask)
+               SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false);
+       else
+               SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
+
+       opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
+       SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
+                                 is_mask);
+       return 0;
+}
+
 static int ipv4_tun_from_nlattr(const struct nlattr *attr,
                                struct sw_flow_match *match, bool is_mask,
                                bool log)
@@ -447,35 +508,22 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
        int rem;
        bool ttl = false;
        __be16 tun_flags = 0;
+       int opts_type = 0;
 
        nla_for_each_nested(a, attr, rem) {
                int type = nla_type(a);
                int err;
 
-               static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
-                       [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64),
-                       [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32),
-                       [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32),
-                       [OVS_TUNNEL_KEY_ATTR_TOS] = 1,
-                       [OVS_TUNNEL_KEY_ATTR_TTL] = 1,
-                       [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0,
-                       [OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
-                       [OVS_TUNNEL_KEY_ATTR_TP_SRC] = sizeof(u16),
-                       [OVS_TUNNEL_KEY_ATTR_TP_DST] = sizeof(u16),
-                       [OVS_TUNNEL_KEY_ATTR_OAM] = 0,
-                       [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = -1,
-               };
-
                if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
                        OVS_NLERR(log, "Tunnel attr %d out of range max %d",
                                  type, OVS_TUNNEL_KEY_ATTR_MAX);
                        return -EINVAL;
                }
 
-               if (ovs_tunnel_key_lens[type] != nla_len(a) &&
-                   ovs_tunnel_key_lens[type] != -1) {
+               if (ovs_tunnel_key_lens[type].len != nla_len(a) &&
+                   ovs_tunnel_key_lens[type].len != OVS_ATTR_NESTED) {
                        OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
-                                 type, nla_len(a), ovs_tunnel_key_lens[type]);
+                                 type, nla_len(a), ovs_tunnel_key_lens[type].len);
                        return -EINVAL;
                }
 
@@ -520,11 +568,30 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
                        tun_flags |= TUNNEL_OAM;
                        break;
                case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
+                       if (opts_type) {
+                               OVS_NLERR(log, "Multiple metadata blocks provided");
+                               return -EINVAL;
+                       }
+
                        err = genev_tun_opt_from_nlattr(a, match, is_mask, log);
                        if (err)
                                return err;
 
-                       tun_flags |= TUNNEL_OPTIONS_PRESENT;
+                       tun_flags |= TUNNEL_GENEVE_OPT;
+                       opts_type = type;
+                       break;
+               case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
+                       if (opts_type) {
+                               OVS_NLERR(log, "Multiple metadata blocks provided");
+                               return -EINVAL;
+                       }
+
+                       err = vxlan_tun_opt_from_nlattr(a, match, is_mask, log);
+                       if (err)
+                               return err;
+
+                       tun_flags |= TUNNEL_VXLAN_OPT;
+                       opts_type = type;
                        break;
                default:
                        OVS_NLERR(log, "Unknown IPv4 tunnel attribute %d",
@@ -553,13 +620,29 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
                }
        }
 
+       return opts_type;
+}
+
+static int vxlan_opt_to_nlattr(struct sk_buff *skb,
+                              const void *tun_opts, int swkey_tun_opts_len)
+{
+       const struct ovs_vxlan_opts *opts = tun_opts;
+       struct nlattr *nla;
+
+       nla = nla_nest_start(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
+       if (!nla)
+               return -EMSGSIZE;
+
+       if (nla_put_u32(skb, OVS_VXLAN_EXT_GBP, opts->gbp) < 0)
+               return -EMSGSIZE;
+
+       nla_nest_end(skb, nla);
        return 0;
 }
 
 static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
                                const struct ovs_key_ipv4_tunnel *output,
-                               const struct geneve_opt *tun_opts,
-                               int swkey_tun_opts_len)
+                               const void *tun_opts, int swkey_tun_opts_len)
 {
        if (output->tun_flags & TUNNEL_KEY &&
            nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
@@ -590,18 +673,22 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
        if ((output->tun_flags & TUNNEL_OAM) &&
            nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
                return -EMSGSIZE;
-       if (tun_opts &&
-           nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
-                   swkey_tun_opts_len, tun_opts))
-               return -EMSGSIZE;
+       if (tun_opts) {
+               if (output->tun_flags & TUNNEL_GENEVE_OPT &&
+                   nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
+                           swkey_tun_opts_len, tun_opts))
+                       return -EMSGSIZE;
+               else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
+                        vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
+                       return -EMSGSIZE;
+       }
 
        return 0;
 }
 
 static int ipv4_tun_to_nlattr(struct sk_buff *skb,
                              const struct ovs_key_ipv4_tunnel *output,
-                             const struct geneve_opt *tun_opts,
-                             int swkey_tun_opts_len)
+                             const void *tun_opts, int swkey_tun_opts_len)
 {
        struct nlattr *nla;
        int err;
@@ -675,7 +762,7 @@ static int metadata_from_nlattrs(struct sw_flow_match *match,  u64 *attrs,
        }
        if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
                if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
-                                        is_mask, log))
+                                        is_mask, log) < 0)
                        return -EINVAL;
                *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
        }
@@ -915,18 +1002,16 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
        return 0;
 }
 
-static void nlattr_set(struct nlattr *attr, u8 val, bool is_attr_mask_key)
+static void nlattr_set(struct nlattr *attr, u8 val,
+                      const struct ovs_len_tbl *tbl)
 {
        struct nlattr *nla;
        int rem;
 
        /* The nlattr stream should already have been validated */
        nla_for_each_nested(nla, attr, rem) {
-               /* We assume that ovs_key_lens[type] == -1 means that type is a
-                * nested attribute
-                */
-               if (is_attr_mask_key && ovs_key_lens[nla_type(nla)] == -1)
-                       nlattr_set(nla, val, false);
+               if (tbl && tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
+                       nlattr_set(nla, val, tbl[nla_type(nla)].next);
                else
                        memset(nla_data(nla), val, nla_len(nla));
        }
@@ -934,7 +1019,7 @@ static void nlattr_set(struct nlattr *attr, u8 val, bool is_attr_mask_key)
 
 static void mask_set_nlattr(struct nlattr *attr, u8 val)
 {
-       nlattr_set(attr, val, true);
+       nlattr_set(attr, val, ovs_key_lens);
 }
 
 /**
@@ -1095,6 +1180,59 @@ free_newmask:
        return err;
 }
 
+static size_t get_ufid_len(const struct nlattr *attr, bool log)
+{
+       size_t len;
+
+       if (!attr)
+               return 0;
+
+       len = nla_len(attr);
+       if (len < 1 || len > MAX_UFID_LENGTH) {
+               OVS_NLERR(log, "ufid size %u bytes exceeds the range (1, %d)",
+                         nla_len(attr), MAX_UFID_LENGTH);
+               return 0;
+       }
+
+       return len;
+}
+
+/* Initializes 'flow->ufid', returning true if 'attr' contains a valid UFID,
+ * or false otherwise.
+ */
+bool ovs_nla_get_ufid(struct sw_flow_id *sfid, const struct nlattr *attr,
+                     bool log)
+{
+       sfid->ufid_len = get_ufid_len(attr, log);
+       if (sfid->ufid_len)
+               memcpy(sfid->ufid, nla_data(attr), sfid->ufid_len);
+
+       return sfid->ufid_len;
+}
+
+int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
+                          const struct sw_flow_key *key, bool log)
+{
+       struct sw_flow_key *new_key;
+
+       if (ovs_nla_get_ufid(sfid, ufid, log))
+               return 0;
+
+       /* If UFID was not provided, use unmasked key. */
+       new_key = kmalloc(sizeof(*new_key), GFP_KERNEL);
+       if (!new_key)
+               return -ENOMEM;
+       memcpy(new_key, key, sizeof(*key));
+       sfid->unmasked_key = new_key;
+
+       return 0;
+}
+
+u32 ovs_nla_get_ufid_flags(const struct nlattr *attr)
+{
+       return attr ? nla_get_u32(attr) : 0;
+}
+
 /**
  * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
  * @key: Receives extracted in_port, priority, tun_key and skb_mark.
@@ -1131,12 +1269,12 @@ int ovs_nla_get_flow_metadata(const struct nlattr *attr,
        return metadata_from_nlattrs(&match, &attrs, a, false, log);
 }
 
-int ovs_nla_put_flow(const struct sw_flow_key *swkey,
-                    const struct sw_flow_key *output, struct sk_buff *skb)
+static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
+                            const struct sw_flow_key *output, bool is_mask,
+                            struct sk_buff *skb)
 {
        struct ovs_key_ethernet *eth_key;
        struct nlattr *nla, *encap;
-       bool is_mask = (swkey != output);
 
        if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
                goto nla_put_failure;
@@ -1148,10 +1286,10 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
                goto nla_put_failure;
 
        if ((swkey->tun_key.ipv4_dst || is_mask)) {
-               const struct geneve_opt *opts = NULL;
+               const void *opts = NULL;
 
                if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
-                       opts = GENEVE_OPTS(output, swkey->tun_opts_len);
+                       opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
 
                if (ipv4_tun_to_nlattr(skb, &output->tun_key, opts,
                                       swkey->tun_opts_len))
@@ -1346,6 +1484,49 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
+int ovs_nla_put_key(const struct sw_flow_key *swkey,
+                   const struct sw_flow_key *output, int attr, bool is_mask,
+                   struct sk_buff *skb)
+{
+       int err;
+       struct nlattr *nla;
+
+       nla = nla_nest_start(skb, attr);
+       if (!nla)
+               return -EMSGSIZE;
+       err = __ovs_nla_put_key(swkey, output, is_mask, skb);
+       if (err)
+               return err;
+       nla_nest_end(skb, nla);
+
+       return 0;
+}
+
+/* Called with ovs_mutex or RCU read lock. */
+int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb)
+{
+       if (ovs_identifier_is_ufid(&flow->id))
+               return nla_put(skb, OVS_FLOW_ATTR_UFID, flow->id.ufid_len,
+                              flow->id.ufid);
+
+       return ovs_nla_put_key(flow->id.unmasked_key, flow->id.unmasked_key,
+                              OVS_FLOW_ATTR_KEY, false, skb);
+}
+
+/* Called with ovs_mutex or RCU read lock. */
+int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb)
+{
+       return ovs_nla_put_key(&flow->mask->key, &flow->key,
+                               OVS_FLOW_ATTR_KEY, false, skb);
+}
+
+/* Called with ovs_mutex or RCU read lock. */
+int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
+{
+       return ovs_nla_put_key(&flow->key, &flow->mask->key,
+                               OVS_FLOW_ATTR_MASK, true, skb);
+}
+
 #define MAX_ACTIONS_BUFSIZE    (32 * 1024)
 
 static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
@@ -1514,16 +1695,6 @@ static int validate_and_copy_sample(const struct nlattr *attr,
        return 0;
 }
 
-static int validate_tp_port(const struct sw_flow_key *flow_key,
-                           __be16 eth_type)
-{
-       if ((eth_type == htons(ETH_P_IP) || eth_type == htons(ETH_P_IPV6)) &&
-           (flow_key->tp.src || flow_key->tp.dst))
-               return 0;
-
-       return -EINVAL;
-}
-
 void ovs_match_init(struct sw_flow_match *match,
                    struct sw_flow_key *key,
                    struct sw_flow_mask *mask)
@@ -1540,6 +1711,34 @@ void ovs_match_init(struct sw_flow_match *match,
        }
 }
 
+static int validate_geneve_opts(struct sw_flow_key *key)
+{
+       struct geneve_opt *option;
+       int opts_len = key->tun_opts_len;
+       bool crit_opt = false;
+
+       option = (struct geneve_opt *)TUN_METADATA_OPTS(key, key->tun_opts_len);
+       while (opts_len > 0) {
+               int len;
+
+               if (opts_len < sizeof(*option))
+                       return -EINVAL;
+
+               len = sizeof(*option) + option->length * 4;
+               if (len > opts_len)
+                       return -EINVAL;
+
+               crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE);
+
+               option = (struct geneve_opt *)((u8 *)option + len);
+               opts_len -= len;
+       };
+
+       key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
+
+       return 0;
+}
+
 static int validate_and_copy_set_tun(const struct nlattr *attr,
                                     struct sw_flow_actions **sfa, bool log)
 {
@@ -1547,36 +1746,23 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
        struct sw_flow_key key;
        struct ovs_tunnel_info *tun_info;
        struct nlattr *a;
-       int err, start;
+       int err, start, opts_type;
 
        ovs_match_init(&match, &key, NULL);
-       err = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log);
-       if (err)
-               return err;
+       opts_type = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log);
+       if (opts_type < 0)
+               return opts_type;
 
        if (key.tun_opts_len) {
-               struct geneve_opt *option = GENEVE_OPTS(&key,
-                                                       key.tun_opts_len);
-               int opts_len = key.tun_opts_len;
-               bool crit_opt = false;
-
-               while (opts_len > 0) {
-                       int len;
-
-                       if (opts_len < sizeof(*option))
-                               return -EINVAL;
-
-                       len = sizeof(*option) + option->length * 4;
-                       if (len > opts_len)
-                               return -EINVAL;
-
-                       crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE);
-
-                       option = (struct geneve_opt *)((u8 *)option + len);
-                       opts_len -= len;
-               };
-
-               key.tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
+               switch (opts_type) {
+               case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
+                       err = validate_geneve_opts(&key);
+                       if (err < 0)
+                               return err;
+                       break;
+               case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
+                       break;
+               }
        };
 
        start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log);
@@ -1597,9 +1783,9 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
                 * everything else will go away after flow setup. We can append
                 * it to tun_info and then point there.
                 */
-               memcpy((tun_info + 1), GENEVE_OPTS(&key, key.tun_opts_len),
-                      key.tun_opts_len);
-               tun_info->options = (struct geneve_opt *)(tun_info + 1);
+               memcpy((tun_info + 1),
+                      TUN_METADATA_OPTS(&key, key.tun_opts_len), key.tun_opts_len);
+               tun_info->options = (tun_info + 1);
        } else {
                tun_info->options = NULL;
        }
@@ -1609,21 +1795,43 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
        return err;
 }
 
+/* Return false if there are any non-masked bits set.
+ * Mask follows data immediately, before any netlink padding.
+ */
+static bool validate_masked(u8 *data, int len)
+{
+       u8 *mask = data + len;
+
+       while (len--)
+               if (*data++ & ~*mask++)
+                       return false;
+
+       return true;
+}
+
 static int validate_set(const struct nlattr *a,
                        const struct sw_flow_key *flow_key,
                        struct sw_flow_actions **sfa,
-                       bool *set_tun, __be16 eth_type, bool log)
+                       bool *skip_copy, __be16 eth_type, bool masked, bool log)
 {
        const struct nlattr *ovs_key = nla_data(a);
        int key_type = nla_type(ovs_key);
+       size_t key_len;
 
        /* There can be only one key in a action */
        if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
                return -EINVAL;
 
+       key_len = nla_len(ovs_key);
+       if (masked)
+               key_len /= 2;
+
        if (key_type > OVS_KEY_ATTR_MAX ||
-           (ovs_key_lens[key_type] != nla_len(ovs_key) &&
-            ovs_key_lens[key_type] != -1))
+           (ovs_key_lens[key_type].len != key_len &&
+            ovs_key_lens[key_type].len != OVS_ATTR_NESTED))
+               return -EINVAL;
+
+       if (masked && !validate_masked(nla_data(ovs_key), key_len))
                return -EINVAL;
 
        switch (key_type) {
@@ -1640,7 +1848,10 @@ static int validate_set(const struct nlattr *a,
                if (eth_p_mpls(eth_type))
                        return -EINVAL;
 
-               *set_tun = true;
+               if (masked)
+                       return -EINVAL; /* Masked tunnel set not supported. */
+
+               *skip_copy = true;
                err = validate_and_copy_set_tun(a, sfa, log);
                if (err)
                        return err;
@@ -1650,48 +1861,66 @@ static int validate_set(const struct nlattr *a,
                if (eth_type != htons(ETH_P_IP))
                        return -EINVAL;
 
-               if (!flow_key->ip.proto)
-                       return -EINVAL;
-
                ipv4_key = nla_data(ovs_key);
-               if (ipv4_key->ipv4_proto != flow_key->ip.proto)
-                       return -EINVAL;
 
-               if (ipv4_key->ipv4_frag != flow_key->ip.frag)
-                       return -EINVAL;
+               if (masked) {
+                       const struct ovs_key_ipv4 *mask = ipv4_key + 1;
+
+                       /* Non-writeable fields. */
+                       if (mask->ipv4_proto || mask->ipv4_frag)
+                               return -EINVAL;
+               } else {
+                       if (ipv4_key->ipv4_proto != flow_key->ip.proto)
+                               return -EINVAL;
 
+                       if (ipv4_key->ipv4_frag != flow_key->ip.frag)
+                               return -EINVAL;
+               }
                break;
 
        case OVS_KEY_ATTR_IPV6:
                if (eth_type != htons(ETH_P_IPV6))
                        return -EINVAL;
 
-               if (!flow_key->ip.proto)
-                       return -EINVAL;
-
                ipv6_key = nla_data(ovs_key);
-               if (ipv6_key->ipv6_proto != flow_key->ip.proto)
-                       return -EINVAL;
 
-               if (ipv6_key->ipv6_frag != flow_key->ip.frag)
-                       return -EINVAL;
+               if (masked) {
+                       const struct ovs_key_ipv6 *mask = ipv6_key + 1;
 
+                       /* Non-writeable fields. */
+                       if (mask->ipv6_proto || mask->ipv6_frag)
+                               return -EINVAL;
+
+                       /* Invalid bits in the flow label mask? */
+                       if (ntohl(mask->ipv6_label) & 0xFFF00000)
+                               return -EINVAL;
+               } else {
+                       if (ipv6_key->ipv6_proto != flow_key->ip.proto)
+                               return -EINVAL;
+
+                       if (ipv6_key->ipv6_frag != flow_key->ip.frag)
+                               return -EINVAL;
+               }
                if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
                        return -EINVAL;
 
                break;
 
        case OVS_KEY_ATTR_TCP:
-               if (flow_key->ip.proto != IPPROTO_TCP)
+               if ((eth_type != htons(ETH_P_IP) &&
+                    eth_type != htons(ETH_P_IPV6)) ||
+                   flow_key->ip.proto != IPPROTO_TCP)
                        return -EINVAL;
 
-               return validate_tp_port(flow_key, eth_type);
+               break;
 
        case OVS_KEY_ATTR_UDP:
-               if (flow_key->ip.proto != IPPROTO_UDP)
+               if ((eth_type != htons(ETH_P_IP) &&
+                    eth_type != htons(ETH_P_IPV6)) ||
+                   flow_key->ip.proto != IPPROTO_UDP)
                        return -EINVAL;
 
-               return validate_tp_port(flow_key, eth_type);
+               break;
 
        case OVS_KEY_ATTR_MPLS:
                if (!eth_p_mpls(eth_type))
@@ -1699,15 +1928,45 @@ static int validate_set(const struct nlattr *a,
                break;
 
        case OVS_KEY_ATTR_SCTP:
-               if (flow_key->ip.proto != IPPROTO_SCTP)
+               if ((eth_type != htons(ETH_P_IP) &&
+                    eth_type != htons(ETH_P_IPV6)) ||
+                   flow_key->ip.proto != IPPROTO_SCTP)
                        return -EINVAL;
 
-               return validate_tp_port(flow_key, eth_type);
+               break;
 
        default:
                return -EINVAL;
        }
 
+       /* Convert non-masked non-tunnel set actions to masked set actions. */
+       if (!masked && key_type != OVS_KEY_ATTR_TUNNEL) {
+               int start, len = key_len * 2;
+               struct nlattr *at;
+
+               *skip_copy = true;
+
+               start = add_nested_action_start(sfa,
+                                               OVS_ACTION_ATTR_SET_TO_MASKED,
+                                               log);
+               if (start < 0)
+                       return start;
+
+               at = __add_action(sfa, key_type, NULL, len, log);
+               if (IS_ERR(at))
+                       return PTR_ERR(at);
+
+               memcpy(nla_data(at), nla_data(ovs_key), key_len); /* Key. */
+               memset(nla_data(at) + key_len, 0xff, key_len);    /* Mask. */
+               /* Clear non-writeable bits from otherwise writeable fields. */
+               if (key_type == OVS_KEY_ATTR_IPV6) {
+                       struct ovs_key_ipv6 *mask = nla_data(at) + key_len;
+
+                       mask->ipv6_label &= htonl(0x000FFFFF);
+               }
+               add_nested_action_end(*sfa, start);
+       }
+
        return 0;
 }
 
@@ -1769,6 +2028,7 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
                        [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
                        [OVS_ACTION_ATTR_POP_VLAN] = 0,
                        [OVS_ACTION_ATTR_SET] = (u32)-1,
+                       [OVS_ACTION_ATTR_SET_MASKED] = (u32)-1,
                        [OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
                        [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash)
                };
@@ -1864,7 +2124,14 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
 
                case OVS_ACTION_ATTR_SET:
                        err = validate_set(a, key, sfa,
-                                          &skip_copy, eth_type, log);
+                                          &skip_copy, eth_type, false, log);
+                       if (err)
+                               return err;
+                       break;
+
+               case OVS_ACTION_ATTR_SET_MASKED:
+                       err = validate_set(a, key, sfa,
+                                          &skip_copy, eth_type, true, log);
                        if (err)
                                return err;
                        break;
@@ -1894,6 +2161,7 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
        return 0;
 }
 
+/* 'key' must be the masked key. */
 int ovs_nla_copy_actions(const struct nlattr *attr,
                         const struct sw_flow_key *key,
                         struct sw_flow_actions **sfa, bool log)
@@ -1981,6 +2249,21 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
        return 0;
 }
 
+static int masked_set_action_to_set_action_attr(const struct nlattr *a,
+                                               struct sk_buff *skb)
+{
+       const struct nlattr *ovs_key = nla_data(a);
+       size_t key_len = nla_len(ovs_key) / 2;
+
+       /* Revert the conversion we did from a non-masked set action to
+        * masked set action.
+        */
+       if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a) - key_len, ovs_key))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
 int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
 {
        const struct nlattr *a;
@@ -1996,6 +2279,12 @@ int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
                                return err;
                        break;
 
+               case OVS_ACTION_ATTR_SET_TO_MASKED:
+                       err = masked_set_action_to_set_action_attr(a, skb);
+                       if (err)
+                               return err;
+                       break;
+
                case OVS_ACTION_ATTR_SAMPLE:
                        err = sample_action_to_attr(a, skb);
                        if (err)
index 577f12be34592a5abbab4f142b5ab02ebda49179..5c3d75bff3104a1ba0ea1b916900514245e9af2f 100644 (file)
@@ -43,16 +43,25 @@ size_t ovs_key_attr_size(void);
 void ovs_match_init(struct sw_flow_match *match,
                    struct sw_flow_key *key, struct sw_flow_mask *mask);
 
-int ovs_nla_put_flow(const struct sw_flow_key *,
-                    const struct sw_flow_key *, struct sk_buff *);
+int ovs_nla_put_key(const struct sw_flow_key *, const struct sw_flow_key *,
+                   int attr, bool is_mask, struct sk_buff *);
 int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *,
                              bool log);
 
+int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb);
+int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb);
+int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb);
+
 int ovs_nla_get_match(struct sw_flow_match *, const struct nlattr *key,
                      const struct nlattr *mask, bool log);
 int ovs_nla_put_egress_tunnel_key(struct sk_buff *,
                                  const struct ovs_tunnel_info *);
 
+bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log);
+int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
+                          const struct sw_flow_key *key, bool log);
+u32 ovs_nla_get_ufid_flags(const struct nlattr *attr);
+
 int ovs_nla_copy_actions(const struct nlattr *attr,
                         const struct sw_flow_key *key,
                         struct sw_flow_actions **sfa, bool log);
index 5899bf161c617de67566f60ad86a23b8ee111d90..4613df8c82900e32a4e0188688c9e0354022484d 100644 (file)
@@ -85,6 +85,8 @@ struct sw_flow *ovs_flow_alloc(void)
 
        flow->sf_acts = NULL;
        flow->mask = NULL;
+       flow->id.unmasked_key = NULL;
+       flow->id.ufid_len = 0;
        flow->stats_last_writer = NUMA_NO_NODE;
 
        /* Initialize the default stat node. */
@@ -139,6 +141,8 @@ static void flow_free(struct sw_flow *flow)
 {
        int node;
 
+       if (ovs_identifier_is_key(&flow->id))
+               kfree(flow->id.unmasked_key);
        kfree((struct sw_flow_actions __force *)flow->sf_acts);
        for_each_node(node)
                if (flow->stats[node])
@@ -200,18 +204,28 @@ static struct table_instance *table_instance_alloc(int new_size)
 
 int ovs_flow_tbl_init(struct flow_table *table)
 {
-       struct table_instance *ti;
+       struct table_instance *ti, *ufid_ti;
 
        ti = table_instance_alloc(TBL_MIN_BUCKETS);
 
        if (!ti)
                return -ENOMEM;
 
+       ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
+       if (!ufid_ti)
+               goto free_ti;
+
        rcu_assign_pointer(table->ti, ti);
+       rcu_assign_pointer(table->ufid_ti, ufid_ti);
        INIT_LIST_HEAD(&table->mask_list);
        table->last_rehash = jiffies;
        table->count = 0;
+       table->ufid_count = 0;
        return 0;
+
+free_ti:
+       __table_instance_destroy(ti);
+       return -ENOMEM;
 }
 
 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
@@ -221,13 +235,16 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
        __table_instance_destroy(ti);
 }
 
-static void table_instance_destroy(struct table_instance *ti, bool deferred)
+static void table_instance_destroy(struct table_instance *ti,
+                                  struct table_instance *ufid_ti,
+                                  bool deferred)
 {
        int i;
 
        if (!ti)
                return;
 
+       BUG_ON(!ufid_ti);
        if (ti->keep_flows)
                goto skip_flows;
 
@@ -236,18 +253,24 @@ static void table_instance_destroy(struct table_instance *ti, bool deferred)
                struct hlist_head *head = flex_array_get(ti->buckets, i);
                struct hlist_node *n;
                int ver = ti->node_ver;
+               int ufid_ver = ufid_ti->node_ver;
 
-               hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
-                       hlist_del_rcu(&flow->hash_node[ver]);
+               hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
+                       hlist_del_rcu(&flow->flow_table.node[ver]);
+                       if (ovs_identifier_is_ufid(&flow->id))
+                               hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
                        ovs_flow_free(flow, deferred);
                }
        }
 
 skip_flows:
-       if (deferred)
+       if (deferred) {
                call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
-       else
+               call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
+       } else {
                __table_instance_destroy(ti);
+               __table_instance_destroy(ufid_ti);
+       }
 }
 
 /* No need for locking this function is called from RCU callback or
@@ -256,8 +279,9 @@ skip_flows:
 void ovs_flow_tbl_destroy(struct flow_table *table)
 {
        struct table_instance *ti = rcu_dereference_raw(table->ti);
+       struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
 
-       table_instance_destroy(ti, false);
+       table_instance_destroy(ti, ufid_ti, false);
 }
 
 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
@@ -272,7 +296,7 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
        while (*bucket < ti->n_buckets) {
                i = 0;
                head = flex_array_get(ti->buckets, *bucket);
-               hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
+               hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
                        if (i < *last) {
                                i++;
                                continue;
@@ -294,16 +318,26 @@ static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
                                (hash & (ti->n_buckets - 1)));
 }
 
-static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow)
+static void table_instance_insert(struct table_instance *ti,
+                                 struct sw_flow *flow)
+{
+       struct hlist_head *head;
+
+       head = find_bucket(ti, flow->flow_table.hash);
+       hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
+}
+
+static void ufid_table_instance_insert(struct table_instance *ti,
+                                      struct sw_flow *flow)
 {
        struct hlist_head *head;
 
-       head = find_bucket(ti, flow->hash);
-       hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head);
+       head = find_bucket(ti, flow->ufid_table.hash);
+       hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
 }
 
 static void flow_table_copy_flows(struct table_instance *old,
-                                 struct table_instance *new)
+                                 struct table_instance *new, bool ufid)
 {
        int old_ver;
        int i;
@@ -318,15 +352,21 @@ static void flow_table_copy_flows(struct table_instance *old,
 
                head = flex_array_get(old->buckets, i);
 
-               hlist_for_each_entry(flow, head, hash_node[old_ver])
-                       table_instance_insert(new, flow);
+               if (ufid)
+                       hlist_for_each_entry(flow, head,
+                                            ufid_table.node[old_ver])
+                               ufid_table_instance_insert(new, flow);
+               else
+                       hlist_for_each_entry(flow, head,
+                                            flow_table.node[old_ver])
+                               table_instance_insert(new, flow);
        }
 
        old->keep_flows = true;
 }
 
 static struct table_instance *table_instance_rehash(struct table_instance *ti,
-                                           int n_buckets)
+                                                   int n_buckets, bool ufid)
 {
        struct table_instance *new_ti;
 
@@ -334,32 +374,45 @@ static struct table_instance *table_instance_rehash(struct table_instance *ti,
        if (!new_ti)
                return NULL;
 
-       flow_table_copy_flows(ti, new_ti);
+       flow_table_copy_flows(ti, new_ti, ufid);
 
        return new_ti;
 }
 
 int ovs_flow_tbl_flush(struct flow_table *flow_table)
 {
-       struct table_instance *old_ti;
-       struct table_instance *new_ti;
+       struct table_instance *old_ti, *new_ti;
+       struct table_instance *old_ufid_ti, *new_ufid_ti;
 
-       old_ti = ovsl_dereference(flow_table->ti);
        new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
        if (!new_ti)
                return -ENOMEM;
+       new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
+       if (!new_ufid_ti)
+               goto err_free_ti;
+
+       old_ti = ovsl_dereference(flow_table->ti);
+       old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
 
        rcu_assign_pointer(flow_table->ti, new_ti);
+       rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
        flow_table->last_rehash = jiffies;
        flow_table->count = 0;
+       flow_table->ufid_count = 0;
 
-       table_instance_destroy(old_ti, true);
+       table_instance_destroy(old_ti, old_ufid_ti, true);
        return 0;
+
+err_free_ti:
+       __table_instance_destroy(new_ti);
+       return -ENOMEM;
 }
 
-static u32 flow_hash(const struct sw_flow_key *key, int key_start,
-                    int key_end)
+static u32 flow_hash(const struct sw_flow_key *key,
+                    const struct sw_flow_key_range *range)
 {
+       int key_start = range->start;
+       int key_end = range->end;
        const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
        int hash_u32s = (key_end - key_start) >> 2;
 
@@ -395,19 +448,20 @@ static bool cmp_key(const struct sw_flow_key *key1,
 
 static bool flow_cmp_masked_key(const struct sw_flow *flow,
                                const struct sw_flow_key *key,
-                               int key_start, int key_end)
+                               const struct sw_flow_key_range *range)
 {
-       return cmp_key(&flow->key, key, key_start, key_end);
+       return cmp_key(&flow->key, key, range->start, range->end);
 }
 
-bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
-                              const struct sw_flow_match *match)
+static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
+                                     const struct sw_flow_match *match)
 {
        struct sw_flow_key *key = match->key;
        int key_start = flow_key_start(key);
        int key_end = match->range.end;
 
-       return cmp_key(&flow->unmasked_key, key, key_start, key_end);
+       BUG_ON(ovs_identifier_is_ufid(&flow->id));
+       return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
 }
 
 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
@@ -416,18 +470,15 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
 {
        struct sw_flow *flow;
        struct hlist_head *head;
-       int key_start = mask->range.start;
-       int key_end = mask->range.end;
        u32 hash;
        struct sw_flow_key masked_key;
 
        ovs_flow_mask_key(&masked_key, unmasked, mask);
-       hash = flow_hash(&masked_key, key_start, key_end);
+       hash = flow_hash(&masked_key, &mask->range);
        head = find_bucket(ti, hash);
-       hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
-               if (flow->mask == mask && flow->hash == hash &&
-                   flow_cmp_masked_key(flow, &masked_key,
-                                         key_start, key_end))
+       hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
+               if (flow->mask == mask && flow->flow_table.hash == hash &&
+                   flow_cmp_masked_key(flow, &masked_key, &mask->range))
                        return flow;
        }
        return NULL;
@@ -469,7 +520,48 @@ struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
        /* Always called under ovs-mutex. */
        list_for_each_entry(mask, &tbl->mask_list, list) {
                flow = masked_flow_lookup(ti, match->key, mask);
-               if (flow && ovs_flow_cmp_unmasked_key(flow, match))  /* Found */
+               if (flow && ovs_identifier_is_key(&flow->id) &&
+                   ovs_flow_cmp_unmasked_key(flow, match))
+                       return flow;
+       }
+       return NULL;
+}
+
+static u32 ufid_hash(const struct sw_flow_id *sfid)
+{
+       return jhash(sfid->ufid, sfid->ufid_len, 0);
+}
+
+static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
+                             const struct sw_flow_id *sfid)
+{
+       if (flow->id.ufid_len != sfid->ufid_len)
+               return false;
+
+       return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
+}
+
+bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
+{
+       if (ovs_identifier_is_ufid(&flow->id))
+               return flow_cmp_masked_key(flow, match->key, &match->range);
+
+       return ovs_flow_cmp_unmasked_key(flow, match);
+}
+
+struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
+                                        const struct sw_flow_id *ufid)
+{
+       struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
+       struct sw_flow *flow;
+       struct hlist_head *head;
+       u32 hash;
+
+       hash = ufid_hash(ufid);
+       head = find_bucket(ti, hash);
+       hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
+               if (flow->ufid_table.hash == hash &&
+                   ovs_flow_cmp_ufid(flow, ufid))
                        return flow;
        }
        return NULL;
@@ -486,9 +578,10 @@ int ovs_flow_tbl_num_masks(const struct flow_table *table)
        return num;
 }
 
-static struct table_instance *table_instance_expand(struct table_instance *ti)
+static struct table_instance *table_instance_expand(struct table_instance *ti,
+                                                   bool ufid)
 {
-       return table_instance_rehash(ti, ti->n_buckets * 2);
+       return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
 }
 
 /* Remove 'mask' from the mask list, if it is not needed any more. */
@@ -513,10 +606,15 @@ static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
 {
        struct table_instance *ti = ovsl_dereference(table->ti);
+       struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
 
        BUG_ON(table->count == 0);
-       hlist_del_rcu(&flow->hash_node[ti->node_ver]);
+       hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
        table->count--;
+       if (ovs_identifier_is_ufid(&flow->id)) {
+               hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
+               table->ufid_count--;
+       }
 
        /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
         * accessible as long as the RCU read lock is held.
@@ -585,34 +683,64 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
 }
 
 /* Must be called with OVS mutex held. */
-int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
-                       const struct sw_flow_mask *mask)
+static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
 {
        struct table_instance *new_ti = NULL;
        struct table_instance *ti;
-       int err;
-
-       err = flow_mask_insert(table, flow, mask);
-       if (err)
-               return err;
 
-       flow->hash = flow_hash(&flow->key, flow->mask->range.start,
-                       flow->mask->range.end);
+       flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
        ti = ovsl_dereference(table->ti);
        table_instance_insert(ti, flow);
        table->count++;
 
        /* Expand table, if necessary, to make room. */
        if (table->count > ti->n_buckets)
-               new_ti = table_instance_expand(ti);
+               new_ti = table_instance_expand(ti, false);
        else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
-               new_ti = table_instance_rehash(ti, ti->n_buckets);
+               new_ti = table_instance_rehash(ti, ti->n_buckets, false);
 
        if (new_ti) {
                rcu_assign_pointer(table->ti, new_ti);
-               table_instance_destroy(ti, true);
+               call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
                table->last_rehash = jiffies;
        }
+}
+
+/* Must be called with OVS mutex held. */
+static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
+{
+       struct table_instance *ti;
+
+       flow->ufid_table.hash = ufid_hash(&flow->id);
+       ti = ovsl_dereference(table->ufid_ti);
+       ufid_table_instance_insert(ti, flow);
+       table->ufid_count++;
+
+       /* Expand table, if necessary, to make room. */
+       if (table->ufid_count > ti->n_buckets) {
+               struct table_instance *new_ti;
+
+               new_ti = table_instance_expand(ti, true);
+               if (new_ti) {
+                       rcu_assign_pointer(table->ufid_ti, new_ti);
+                       call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
+               }
+       }
+}
+
+/* Must be called with OVS mutex held. */
+int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
+                       const struct sw_flow_mask *mask)
+{
+       int err;
+
+       err = flow_mask_insert(table, flow, mask);
+       if (err)
+               return err;
+       flow_key_insert(table, flow);
+       if (ovs_identifier_is_ufid(&flow->id))
+               flow_ufid_insert(table, flow);
+
        return 0;
 }
 
index 309fa6415689d89494af3762dc4c7caefa7db673..616eda10d9554d1e519d38b65f04352bc1f8796d 100644 (file)
@@ -47,9 +47,11 @@ struct table_instance {
 
 struct flow_table {
        struct table_instance __rcu *ti;
+       struct table_instance __rcu *ufid_ti;
        struct list_head mask_list;
        unsigned long last_rehash;
        unsigned int count;
+       unsigned int ufid_count;
 };
 
 extern struct kmem_cache *flow_stats_cache;
@@ -78,8 +80,10 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
                                    const struct sw_flow_key *);
 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
                                          const struct sw_flow_match *match);
-bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
-                              const struct sw_flow_match *match);
+struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *,
+                                        const struct sw_flow_id *);
+
+bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *);
 
 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
                       const struct sw_flow_mask *mask);
index 484864dd0e689290dfca8a2c204e984de1b33184..bf02fd5808c964eaefd12a2012f2f15500927c99 100644 (file)
@@ -9,8 +9,6 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#include <linux/version.h>
-
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/net.h>
@@ -90,7 +88,7 @@ static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb)
 
        opts_len = geneveh->opt_len * 4;
 
-       flags = TUNNEL_KEY | TUNNEL_OPTIONS_PRESENT |
+       flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
                (udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0) |
                (geneveh->oam ? TUNNEL_OAM : 0) |
                (geneveh->critical ? TUNNEL_CRIT_OPT : 0);
@@ -172,7 +170,7 @@ error:
 
 static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
 {
-       struct ovs_key_ipv4_tunnel *tun_key;
+       const struct ovs_key_ipv4_tunnel *tun_key;
        struct ovs_tunnel_info *tun_info;
        struct net *net = ovs_dp_get_net(vport->dp);
        struct geneve_port *geneve_port = geneve_vport(vport);
@@ -180,7 +178,7 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
        __be16 sport;
        struct rtable *rt;
        struct flowi4 fl;
-       u8 vni[3];
+       u8 vni[3], opts_len, *opts;
        __be16 df;
        int err;
 
@@ -191,16 +189,7 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
        }
 
        tun_key = &tun_info->tunnel;
-
-       /* Route lookup */
-       memset(&fl, 0, sizeof(fl));
-       fl.daddr = tun_key->ipv4_dst;
-       fl.saddr = tun_key->ipv4_src;
-       fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
-       fl.flowi4_mark = skb->mark;
-       fl.flowi4_proto = IPPROTO_UDP;
-
-       rt = ip_route_output_key(net, &fl);
+       rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
        if (IS_ERR(rt)) {
                err = PTR_ERR(rt);
                goto error;
@@ -211,12 +200,19 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
        tunnel_id_to_vni(tun_key->tun_id, vni);
        skb->ignore_df = 1;
 
+       if (tun_key->tun_flags & TUNNEL_GENEVE_OPT) {
+               opts = (u8 *)tun_info->options;
+               opts_len = tun_info->options_len;
+       } else {
+               opts = NULL;
+               opts_len = 0;
+       }
+
        err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr,
                              tun_key->ipv4_dst, tun_key->ipv4_tos,
                              tun_key->ipv4_ttl, df, sport, dport,
-                             tun_key->tun_flags, vni,
-                             tun_info->options_len, (u8 *)tun_info->options,
-                             false);
+                             tun_key->tun_flags, vni, opts_len, opts,
+                             !!(tun_key->tun_flags & TUNNEL_CSUM), false);
        if (err < 0)
                ip_rt_put(rt);
        return err;
index d4168c442db5af8683c72bfb24ce95c789dadcf8..f17ac9642f4ee3cca4ce9bece9bafa32de786621 100644 (file)
@@ -134,7 +134,7 @@ static int gre_err(struct sk_buff *skb, u32 info,
 static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
 {
        struct net *net = ovs_dp_get_net(vport->dp);
-       struct ovs_key_ipv4_tunnel *tun_key;
+       const struct ovs_key_ipv4_tunnel *tun_key;
        struct flowi4 fl;
        struct rtable *rt;
        int min_headroom;
@@ -148,15 +148,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
        }
 
        tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
-       /* Route lookup */
-       memset(&fl, 0, sizeof(fl));
-       fl.daddr = tun_key->ipv4_dst;
-       fl.saddr = tun_key->ipv4_src;
-       fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
-       fl.flowi4_mark = skb->mark;
-       fl.flowi4_proto = IPPROTO_GRE;
-
-       rt = ip_route_output_key(net, &fl);
+       rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_GRE);
        if (IS_ERR(rt)) {
                err = PTR_ERR(rt);
                goto err_free_skb;
@@ -166,7 +158,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
 
        min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
                        + tunnel_hlen + sizeof(struct iphdr)
-                       + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+                       + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
        if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
                int head_delta = SKB_DATA_ALIGN(min_headroom -
                                                skb_headroom(skb) +
index d7c46b301024906cf748453d24b91d2790f2e272..ff07d4062d601d6c6a0ba22930680143103a08ef 100644 (file)
@@ -40,6 +40,7 @@
 
 #include "datapath.h"
 #include "vport.h"
+#include "vport-vxlan.h"
 
 /**
  * struct vxlan_port - Keeps track of open UDP ports
@@ -49,6 +50,7 @@
 struct vxlan_port {
        struct vxlan_sock *vs;
        char name[IFNAMSIZ];
+       u32 exts; /* VXLAN_F_* in <net/vxlan.h> */
 };
 
 static struct vport_ops ovs_vxlan_vport_ops;
@@ -59,19 +61,30 @@ static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
 }
 
 /* Called with rcu_read_lock and BH disabled. */
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
+static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
+                     struct vxlan_metadata *md)
 {
        struct ovs_tunnel_info tun_info;
+       struct vxlan_port *vxlan_port;
        struct vport *vport = vs->data;
        struct iphdr *iph;
+       struct ovs_vxlan_opts opts = {
+               .gbp = md->gbp,
+       };
        __be64 key;
+       __be16 flags;
+
+       flags = TUNNEL_KEY | (udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0);
+       vxlan_port = vxlan_vport(vport);
+       if (vxlan_port->exts & VXLAN_F_GBP)
+               flags |= TUNNEL_VXLAN_OPT;
 
        /* Save outer tunnel values */
        iph = ip_hdr(skb);
-       key = cpu_to_be64(ntohl(vx_vni) >> 8);
+       key = cpu_to_be64(ntohl(md->vni) >> 8);
        ovs_flow_tun_info_init(&tun_info, iph,
                               udp_hdr(skb)->source, udp_hdr(skb)->dest,
-                              key, TUNNEL_KEY, NULL, 0);
+                              key, flags, &opts, sizeof(opts));
 
        ovs_vport_receive(vport, skb, &tun_info);
 }
@@ -83,6 +96,21 @@ static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
 
        if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
                return -EMSGSIZE;
+
+       if (vxlan_port->exts) {
+               struct nlattr *exts;
+
+               exts = nla_nest_start(skb, OVS_TUNNEL_ATTR_EXTENSION);
+               if (!exts)
+                       return -EMSGSIZE;
+
+               if (vxlan_port->exts & VXLAN_F_GBP &&
+                   nla_put_flag(skb, OVS_VXLAN_EXT_GBP))
+                       return -EMSGSIZE;
+
+               nla_nest_end(skb, exts);
+       }
+
        return 0;
 }
 
@@ -95,6 +123,31 @@ static void vxlan_tnl_destroy(struct vport *vport)
        ovs_vport_deferred_free(vport);
 }
 
+static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX+1] = {
+       [OVS_VXLAN_EXT_GBP]     = { .type = NLA_FLAG, },
+};
+
+static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr)
+{
+       struct nlattr *exts[OVS_VXLAN_EXT_MAX+1];
+       struct vxlan_port *vxlan_port;
+       int err;
+
+       if (nla_len(attr) < sizeof(struct nlattr))
+               return -EINVAL;
+
+       err = nla_parse_nested(exts, OVS_VXLAN_EXT_MAX, attr, exts_policy);
+       if (err < 0)
+               return err;
+
+       vxlan_port = vxlan_vport(vport);
+
+       if (exts[OVS_VXLAN_EXT_GBP])
+               vxlan_port->exts |= VXLAN_F_GBP;
+
+       return 0;
+}
+
 static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
 {
        struct net *net = ovs_dp_get_net(parms->dp);
@@ -127,7 +180,17 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
        vxlan_port = vxlan_vport(vport);
        strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
 
-       vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true, 0);
+       a = nla_find_nested(options, OVS_TUNNEL_ATTR_EXTENSION);
+       if (a) {
+               err = vxlan_configure_exts(vport, a);
+               if (err) {
+                       ovs_vport_free(vport);
+                       goto error;
+               }
+       }
+
+       vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true,
+                           vxlan_port->exts);
        if (IS_ERR(vs)) {
                ovs_vport_free(vport);
                return (void *)vs;
@@ -140,17 +203,34 @@ error:
        return ERR_PTR(err);
 }
 
+static int vxlan_ext_gbp(struct sk_buff *skb)
+{
+       const struct ovs_tunnel_info *tun_info;
+       const struct ovs_vxlan_opts *opts;
+
+       tun_info = OVS_CB(skb)->egress_tun_info;
+       opts = tun_info->options;
+
+       if (tun_info->tunnel.tun_flags & TUNNEL_VXLAN_OPT &&
+           tun_info->options_len >= sizeof(*opts))
+               return opts->gbp;
+       else
+               return 0;
+}
+
 static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
 {
        struct net *net = ovs_dp_get_net(vport->dp);
        struct vxlan_port *vxlan_port = vxlan_vport(vport);
        __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
-       struct ovs_key_ipv4_tunnel *tun_key;
+       const struct ovs_key_ipv4_tunnel *tun_key;
+       struct vxlan_metadata md = {0};
        struct rtable *rt;
        struct flowi4 fl;
        __be16 src_port;
        __be16 df;
        int err;
+       u32 vxflags;
 
        if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
                err = -EINVAL;
@@ -158,15 +238,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
        }
 
        tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
-       /* Route lookup */
-       memset(&fl, 0, sizeof(fl));
-       fl.daddr = tun_key->ipv4_dst;
-       fl.saddr = tun_key->ipv4_src;
-       fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
-       fl.flowi4_mark = skb->mark;
-       fl.flowi4_proto = IPPROTO_UDP;
-
-       rt = ip_route_output_key(net, &fl);
+       rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
        if (IS_ERR(rt)) {
                err = PTR_ERR(rt);
                goto error;
@@ -178,13 +250,15 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
        skb->ignore_df = 1;
 
        src_port = udp_flow_src_port(net, skb, 0, 0, true);
+       md.vni = htonl(be64_to_cpu(tun_key->tun_id) << 8);
+       md.gbp = vxlan_ext_gbp(skb);
+       vxflags = vxlan_port->exts |
+                     (tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0);
 
-       err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
-                            fl.saddr, tun_key->ipv4_dst,
+       err = vxlan_xmit_skb(rt, skb, fl.saddr, tun_key->ipv4_dst,
                             tun_key->ipv4_tos, tun_key->ipv4_ttl, df,
                             src_port, dst_port,
-                            htonl(be64_to_cpu(tun_key->tun_id) << 8),
-                            false);
+                            &md, false, vxflags);
        if (err < 0)
                ip_rt_put(rt);
        return err;
diff --git a/net/openvswitch/vport-vxlan.h b/net/openvswitch/vport-vxlan.h
new file mode 100644 (file)
index 0000000..4b08233
--- /dev/null
@@ -0,0 +1,11 @@
+#ifndef VPORT_VXLAN_H
+#define VPORT_VXLAN_H 1
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+struct ovs_vxlan_opts {
+       __u32 gbp;
+};
+
+#endif
index 53f3ebbfceabcb3d0b90cb1508c295c51da49eeb..ec2954ffc690c612eb1b04b018134ba0f52ba5c8 100644 (file)
@@ -480,7 +480,8 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
        stats = this_cpu_ptr(vport->percpu_stats);
        u64_stats_update_begin(&stats->syncp);
        stats->rx_packets++;
-       stats->rx_bytes += skb->len;
+       stats->rx_bytes += skb->len +
+                          (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
        u64_stats_update_end(&stats->syncp);
 
        OVS_CB(skb)->input_vport = vport;
@@ -594,14 +595,7 @@ int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
         * The process may need to be changed if the corresponding process
         * in vports ops changed.
         */
-       memset(&fl, 0, sizeof(fl));
-       fl.daddr = tun_key->ipv4_dst;
-       fl.saddr = tun_key->ipv4_src;
-       fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
-       fl.flowi4_mark = skb_mark;
-       fl.flowi4_proto = ipproto;
-
-       rt = ip_route_output_key(net, &fl);
+       rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto);
        if (IS_ERR(rt))
                return PTR_ERR(rt);
 
index 99c8e71d9e6cb1d2d73bf76829b123811359ed39..f8ae295fb0011f7cc5dea75737833b7086641c77 100644 (file)
@@ -236,4 +236,22 @@ static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
 int ovs_vport_ops_register(struct vport_ops *ops);
 void ovs_vport_ops_unregister(struct vport_ops *ops);
 
+static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
+                                                    const struct ovs_key_ipv4_tunnel *key,
+                                                    u32 mark,
+                                                    struct flowi4 *fl,
+                                                    u8 protocol)
+{
+       struct rtable *rt;
+
+       memset(fl, 0, sizeof(*fl));
+       fl->daddr = key->ipv4_dst;
+       fl->saddr = key->ipv4_src;
+       fl->flowi4_tos = RT_TOS(key->ipv4_tos);
+       fl->flowi4_mark = mark;
+       fl->flowi4_proto = protocol;
+
+       rt = ip_route_output_key(net, fl);
+       return rt;
+}
 #endif /* vport.h */
index 6880f34a529a56510b1145e14d41ed04803dd6a7..9c28cec1a0838ecf8ea03ceff77fb301c5a425a7 100644 (file)
@@ -986,8 +986,8 @@ static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
                        struct tpacket3_hdr *ppd)
 {
-       if (vlan_tx_tag_present(pkc->skb)) {
-               ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
+       if (skb_vlan_tag_present(pkc->skb)) {
+               ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
                ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
                ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
        } else {
@@ -2000,8 +2000,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                h.h2->tp_net = netoff;
                h.h2->tp_sec = ts.tv_sec;
                h.h2->tp_nsec = ts.tv_nsec;
-               if (vlan_tx_tag_present(skb)) {
-                       h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
+               if (skb_vlan_tag_present(skb)) {
+                       h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
                        h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
                        status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
                } else {
@@ -2102,7 +2102,7 @@ static bool ll_header_truncated(const struct net_device *dev, int len)
 {
        /* net device doesn't like empty head */
        if (unlikely(len <= dev->hard_header_len)) {
-               net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
+               net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n",
                                     current->comm, len, dev->hard_header_len);
                return true;
        }
@@ -2517,7 +2517,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        err = -EINVAL;
        if (sock->type == SOCK_DGRAM) {
                offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
-               if (unlikely(offset) < 0)
+               if (unlikely(offset < 0))
                        goto out_free;
        } else {
                if (ll_header_truncated(dev, len))
@@ -3010,8 +3010,8 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
                aux.tp_snaplen = skb->len;
                aux.tp_mac = 0;
                aux.tp_net = skb_network_offset(skb);
-               if (vlan_tx_tag_present(skb)) {
-                       aux.tp_vlan_tci = vlan_tx_tag_get(skb);
+               if (skb_vlan_tag_present(skb)) {
+                       aux.tp_vlan_tci = skb_vlan_tag_get(skb);
                        aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
                        aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
                } else {
index 92f2c7107eec4f307cc50cdfedfb4ea2db0e59de..0ed68f0238bf9416f1cc92928b61a385a1055886 100644 (file)
@@ -177,7 +177,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
                                     PACKET_DIAG_FILTER))
                goto out_nlmsg_trim;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 out_nlmsg_trim:
        nlmsg_cancel(skb, nlh);
index b64151ade6b33a9cbacb0980d3ddbe03d8f7b4c8..bc5ee5fbe6ae3845b6ec67eaea2beaec2d12ba08 100644 (file)
@@ -121,7 +121,8 @@ static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
        ifm->ifa_index = dev->ifindex;
        if (nla_put_u8(skb, IFA_LOCAL, addr))
                goto nla_put_failure;
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -190,7 +191,8 @@ static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst,
        if (nla_put_u8(skb, RTA_DST, dst) ||
            nla_put_u32(skb, RTA_OIF, dev->ifindex))
                goto nla_put_failure;
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 nla_put_failure:
        nlmsg_cancel(skb, nlh);
@@ -270,27 +272,23 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
 static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net *net = sock_net(skb->sk);
-       u8 addr, addr_idx = 0, addr_start_idx = cb->args[0];
+       u8 addr;
 
        rcu_read_lock();
-       for (addr = 0; addr < 64; addr++) {
-               struct net_device *dev;
+       for (addr = cb->args[0]; addr < 64; addr++) {
+               struct net_device *dev = phonet_route_get_rcu(net, addr << 2);
 
-               dev = phonet_route_get_rcu(net, addr << 2);
                if (!dev)
                        continue;
 
-               if (addr_idx++ < addr_start_idx)
-                       continue;
                if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).portid,
-                               cb->nlh->nlmsg_seq, RTM_NEWROUTE))
+                              cb->nlh->nlmsg_seq, RTM_NEWROUTE) < 0)
                        goto out;
        }
 
 out:
        rcu_read_unlock();
-       cb->args[0] = addr_idx;
-       cb->args[1] = 0;
+       cb->args[0] = addr;
 
        return skb->len;
 }
index 1dde91e3dc7033c575dcfc041a23402f98e52239..bd3825d38abc923bd905b6af266b4fffe706f427 100644 (file)
@@ -409,7 +409,7 @@ try_again:
        posted = IB_GET_POST_CREDITS(oldval);
        avail = IB_GET_SEND_CREDITS(oldval);
 
-       rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n",
+       rdsdebug("wanted=%u credits=%u posted=%u\n",
                        wanted, avail, posted);
 
        /* The last credit must be used to send a credit update. */
@@ -453,7 +453,7 @@ void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
        if (credits == 0)
                return;
 
-       rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n",
+       rdsdebug("credits=%u current=%u%s\n",
                        credits,
                        IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
                        test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
index a91e1db62ee6a1833e65372989b0644d980899e6..a6c2bea9f8f9b37b46ce381336a90fb685187083 100644 (file)
@@ -590,8 +590,8 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
                        /* Actually this may happen quite frequently, when
                         * an outgoing connect raced with an incoming connect.
                         */
-                       rdsdebug("rds_iw_conn_shutdown: failed to disconnect,"
-                                  " cm: %p err %d\n", ic->i_cm_id, err);
+                       rdsdebug("failed to disconnect, cm: %p err %d\n",
+                                ic->i_cm_id, err);
                }
 
                if (ic->i_cm_id->qp) {
index 9105ea03aec5dc05bad0221eb00c4794eb6e463a..13834780a3089e9e640e470f7b2e8b26c6334b7b 100644 (file)
@@ -361,7 +361,7 @@ try_again:
        posted = IB_GET_POST_CREDITS(oldval);
        avail = IB_GET_SEND_CREDITS(oldval);
 
-       rdsdebug("rds_iw_send_grab_credits(%u): credits=%u posted=%u\n",
+       rdsdebug("wanted=%u credits=%u posted=%u\n",
                        wanted, avail, posted);
 
        /* The last credit must be used to send a credit update. */
@@ -405,7 +405,7 @@ void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits)
        if (credits == 0)
                return;
 
-       rdsdebug("rds_iw_send_add_credits(%u): current=%u%s\n",
+       rdsdebug("credits=%u current=%u%s\n",
                        credits,
                        IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
                        test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
index 5a21e6f5986f1b822e41077f54f5f6235fe47068..756c73729126d45c18a29bd5859aa04596a1ed0f 100644 (file)
@@ -266,7 +266,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
 
 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from)
 {
-       unsigned long to_copy;
+       unsigned long to_copy, nbytes;
        unsigned long sg_off;
        struct scatterlist *sg;
        int ret = 0;
@@ -293,9 +293,9 @@ int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from)
                                sg->length - sg_off);
 
                rds_stats_add(s_copy_from_user, to_copy);
-               ret = copy_page_from_iter(sg_page(sg), sg->offset + sg_off,
-                                         to_copy, from);
-               if (ret != to_copy)
+               nbytes = copy_page_from_iter(sg_page(sg), sg->offset + sg_off,
+                                            to_copy, from);
+               if (nbytes != to_copy)
                        return -EFAULT;
 
                sg_off += to_copy;
index c3b0cd43eb56689e395581c4757402bad531e271..c173f69e1479bfaf643b9e5c69f4c9a151b18c67 100644 (file)
@@ -71,14 +71,14 @@ static struct ctl_table rds_sysctl_rds_table[] = {
        {
                .procname       = "max_unacked_packets",
                .data           = &rds_sysctl_max_unacked_packets,
-               .maxlen         = sizeof(unsigned long),
+               .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "max_unacked_bytes",
                .data           = &rds_sysctl_max_unacked_bytes,
-               .maxlen         = sizeof(unsigned long),
+               .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
index 3f4a0bbeed3da2bfe2863153dc44767f47e43686..d978f2f46ff35e0181e2a833e3bfab2b6d58221e 100644 (file)
@@ -170,6 +170,7 @@ static const struct acpi_device_id rfkill_acpi_match[] = {
        { "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E39", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
+       { "BCM2E40", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E64", RFKILL_TYPE_BLUETOOTH },
        { "BCM4752", RFKILL_TYPE_GPS },
        { "LNV4752", RFKILL_TYPE_GPS },
index 74c0fcd36838dfc7326eeddb7726483576da0413..5394b6be46ecd5ebb6c677b745a4848977433f70 100644 (file)
@@ -42,6 +42,11 @@ void rxrpc_UDP_error_report(struct sock *sk)
                _leave("UDP socket errqueue empty");
                return;
        }
+       if (!skb->len) {
+               _leave("UDP empty message");
+               kfree_skb(skb);
+               return;
+       }
 
        rxrpc_new_skb(skb);
 
index e1a9373e59799fd2a9cd998fbdc4399d2d021f6a..8331c95e152283d437b3dee9205cf37c9ce06271 100644 (file)
@@ -232,10 +232,7 @@ int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
                   call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
                ret = -EPROTO; /* request phase complete for this client call */
        } else {
-               mm_segment_t oldfs = get_fs();
-               set_fs(KERNEL_DS);
                ret = rxrpc_send_data(NULL, call->socket, call, msg, len);
-               set_fs(oldfs);
        }
 
        release_sock(&call->socket->sk);
@@ -529,13 +526,11 @@ static int rxrpc_send_data(struct kiocb *iocb,
                           struct msghdr *msg, size_t len)
 {
        struct rxrpc_skb_priv *sp;
-       unsigned char __user *from;
        struct sk_buff *skb;
-       const struct iovec *iov;
        struct sock *sk = &rx->sk;
        long timeo;
        bool more;
-       int ret, ioc, segment, copied;
+       int ret, copied;
 
        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 
@@ -545,25 +540,17 @@ static int rxrpc_send_data(struct kiocb *iocb,
        if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
                return -EPIPE;
 
-       iov = msg->msg_iter.iov;
-       ioc = msg->msg_iter.nr_segs - 1;
-       from = iov->iov_base;
-       segment = iov->iov_len;
-       iov++;
        more = msg->msg_flags & MSG_MORE;
 
        skb = call->tx_pending;
        call->tx_pending = NULL;
 
        copied = 0;
-       do {
+       if (len > iov_iter_count(&msg->msg_iter))
+               len = iov_iter_count(&msg->msg_iter);
+       while (len) {
                int copy;
 
-               if (segment > len)
-                       segment = len;
-
-               _debug("SEGMENT %d @%p", segment, from);
-
                if (!skb) {
                        size_t size, chunk, max, space;
 
@@ -631,13 +618,13 @@ static int rxrpc_send_data(struct kiocb *iocb,
                /* append next segment of data to the current buffer */
                copy = skb_tailroom(skb);
                ASSERTCMP(copy, >, 0);
-               if (copy > segment)
-                       copy = segment;
+               if (copy > len)
+                       copy = len;
                if (copy > sp->remain)
                        copy = sp->remain;
 
                _debug("add");
-               ret = skb_add_data(skb, from, copy);
+               ret = skb_add_data(skb, &msg->msg_iter, copy);
                _debug("added");
                if (ret < 0)
                        goto efault;
@@ -646,18 +633,6 @@ static int rxrpc_send_data(struct kiocb *iocb,
                copied += copy;
 
                len -= copy;
-               segment -= copy;
-               from += copy;
-               while (segment == 0 && ioc > 0) {
-                       from = iov->iov_base;
-                       segment = iov->iov_len;
-                       iov++;
-                       ioc--;
-               }
-               if (len == 0) {
-                       segment = 0;
-                       ioc = 0;
-               }
 
                /* check for the far side aborting the call or a network error
                 * occurring */
@@ -665,7 +640,7 @@ static int rxrpc_send_data(struct kiocb *iocb,
                        goto call_aborted;
 
                /* add the packet to the send queue if it's now full */
-               if (sp->remain <= 0 || (segment == 0 && !more)) {
+               if (sp->remain <= 0 || (!len && !more)) {
                        struct rxrpc_connection *conn = call->conn;
                        uint32_t seq;
                        size_t pad;
@@ -711,11 +686,10 @@ static int rxrpc_send_data(struct kiocb *iocb,
 
                        memcpy(skb->head, &sp->hdr,
                               sizeof(struct rxrpc_header));
-                       rxrpc_queue_packet(call, skb, segment == 0 && !more);
+                       rxrpc_queue_packet(call, skb, !iov_iter_count(&msg->msg_iter) && !more);
                        skb = NULL;
                }
-
-       } while (segment > 0);
+       }
 
 success:
        ret = copied;
index c54c9d9d1ffb814e7f4ead7e1db3dbf6c4a9adb7..899d0319f2b273e47a73efe4caac7c14bfdfe219 100644 (file)
@@ -698,6 +698,30 @@ config NET_ACT_VLAN
          To compile this code as a module, choose M here: the
          module will be called act_vlan.
 
+config NET_ACT_BPF
+        tristate "BPF based action"
+        depends on NET_CLS_ACT
+        ---help---
+         Say Y here to execute BPF code on packets. The BPF code will decide
+         if the packet should be dropped or not.
+
+         If unsure, say N.
+
+         To compile this code as a module, choose M here: the
+         module will be called act_bpf.
+
+config NET_ACT_CONNMARK
+        tristate "Netfilter Connection Mark Retriever"
+        depends on NET_CLS_ACT && NETFILTER && IP_NF_IPTABLES
+        depends on NF_CONNTRACK && NF_CONNTRACK_MARK
+        ---help---
+         Say Y here to allow retrieving of conn mark
+
+         If unsure, say N.
+
+         To compile this code as a module, choose M here: the
+         module will be called act_connmark.
+
 config NET_CLS_IND
        bool "Incoming device classification"
        depends on NET_CLS_U32 || NET_CLS_FW
index 679f24ae7f933d59298be54ca96b4728b9f7280b..7ca7f4c1b8c210c9358252c61fc18fff12e3f5db 100644 (file)
@@ -17,6 +17,8 @@ obj-$(CONFIG_NET_ACT_SIMP)    += act_simple.o
 obj-$(CONFIG_NET_ACT_SKBEDIT)  += act_skbedit.o
 obj-$(CONFIG_NET_ACT_CSUM)     += act_csum.o
 obj-$(CONFIG_NET_ACT_VLAN)     += act_vlan.o
+obj-$(CONFIG_NET_ACT_BPF)      += act_bpf.o
+obj-$(CONFIG_NET_ACT_CONNMARK) += act_connmark.o
 obj-$(CONFIG_NET_SCH_FIFO)     += sch_fifo.o
 obj-$(CONFIG_NET_SCH_CBQ)      += sch_cbq.o
 obj-$(CONFIG_NET_SCH_HTB)      += sch_htb.o
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
new file mode 100644 (file)
index 0000000..82c5d7f
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/filter.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+
+#include <linux/tc_act/tc_bpf.h>
+#include <net/tc_act/tc_bpf.h>
+
+#define BPF_TAB_MASK     15
+
+static int tcf_bpf(struct sk_buff *skb, const struct tc_action *a,
+                  struct tcf_result *res)
+{
+       struct tcf_bpf *b = a->priv;
+       int action;
+       int filter_res;
+
+       spin_lock(&b->tcf_lock);
+       b->tcf_tm.lastuse = jiffies;
+       bstats_update(&b->tcf_bstats, skb);
+       action = b->tcf_action;
+
+       filter_res = BPF_PROG_RUN(b->filter, skb);
+       if (filter_res == 0) {
+               /* Return code 0 from the BPF program
+                * is being interpreted as a drop here.
+                */
+               action = TC_ACT_SHOT;
+               b->tcf_qstats.drops++;
+       }
+
+       spin_unlock(&b->tcf_lock);
+       return action;
+}
+
+static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *a,
+                       int bind, int ref)
+{
+       unsigned char *tp = skb_tail_pointer(skb);
+       struct tcf_bpf *b = a->priv;
+       struct tc_act_bpf opt = {
+               .index    = b->tcf_index,
+               .refcnt   = b->tcf_refcnt - ref,
+               .bindcnt  = b->tcf_bindcnt - bind,
+               .action   = b->tcf_action,
+       };
+       struct tcf_t t;
+       struct nlattr *nla;
+
+       if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
+               goto nla_put_failure;
+
+       if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, b->bpf_num_ops))
+               goto nla_put_failure;
+
+       nla = nla_reserve(skb, TCA_ACT_BPF_OPS, b->bpf_num_ops *
+                         sizeof(struct sock_filter));
+       if (!nla)
+               goto nla_put_failure;
+
+       memcpy(nla_data(nla), b->bpf_ops, nla_len(nla));
+
+       t.install = jiffies_to_clock_t(jiffies - b->tcf_tm.install);
+       t.lastuse = jiffies_to_clock_t(jiffies - b->tcf_tm.lastuse);
+       t.expires = jiffies_to_clock_t(b->tcf_tm.expires);
+       if (nla_put(skb, TCA_ACT_BPF_TM, sizeof(t), &t))
+               goto nla_put_failure;
+       return skb->len;
+
+nla_put_failure:
+       nlmsg_trim(skb, tp);
+       return -1;
+}
+
+static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
+       [TCA_ACT_BPF_PARMS]     = { .len = sizeof(struct tc_act_bpf) },
+       [TCA_ACT_BPF_OPS_LEN]   = { .type = NLA_U16 },
+       [TCA_ACT_BPF_OPS]       = { .type = NLA_BINARY,
+                                   .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
+};
+
+static int tcf_bpf_init(struct net *net, struct nlattr *nla,
+                       struct nlattr *est, struct tc_action *a,
+                       int ovr, int bind)
+{
+       struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
+       struct tc_act_bpf *parm;
+       struct tcf_bpf *b;
+       u16 bpf_size, bpf_num_ops;
+       struct sock_filter *bpf_ops;
+       struct sock_fprog_kern tmp;
+       struct bpf_prog *fp;
+       int ret;
+
+       if (!nla)
+               return -EINVAL;
+
+       ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy);
+       if (ret < 0)
+               return ret;
+
+       if (!tb[TCA_ACT_BPF_PARMS] ||
+           !tb[TCA_ACT_BPF_OPS_LEN] || !tb[TCA_ACT_BPF_OPS])
+               return -EINVAL;
+       parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
+
+       bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
+       if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
+               return -EINVAL;
+
+       bpf_size = bpf_num_ops * sizeof(*bpf_ops);
+       if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
+               return -EINVAL;
+
+       bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
+       if (!bpf_ops)
+               return -ENOMEM;
+
+       memcpy(bpf_ops, nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size);
+
+       tmp.len = bpf_num_ops;
+       tmp.filter = bpf_ops;
+
+       ret = bpf_prog_create(&fp, &tmp);
+       if (ret)
+               goto free_bpf_ops;
+
+       if (!tcf_hash_check(parm->index, a, bind)) {
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*b), bind);
+               if (ret)
+                       goto destroy_fp;
+
+               ret = ACT_P_CREATED;
+       } else {
+               if (bind)
+                       goto destroy_fp;
+               tcf_hash_release(a, bind);
+               if (!ovr) {
+                       ret = -EEXIST;
+                       goto destroy_fp;
+               }
+       }
+
+       b = to_bpf(a);
+       spin_lock_bh(&b->tcf_lock);
+       b->tcf_action = parm->action;
+       b->bpf_num_ops = bpf_num_ops;
+       b->bpf_ops = bpf_ops;
+       b->filter = fp;
+       spin_unlock_bh(&b->tcf_lock);
+
+       if (ret == ACT_P_CREATED)
+               tcf_hash_insert(a);
+       return ret;
+
+destroy_fp:
+       bpf_prog_destroy(fp);
+free_bpf_ops:
+       kfree(bpf_ops);
+       return ret;
+}
+
+static void tcf_bpf_cleanup(struct tc_action *a, int bind)
+{
+       struct tcf_bpf *b = a->priv;
+
+       bpf_prog_destroy(b->filter);
+}
+
+static struct tc_action_ops act_bpf_ops = {
+       .kind =         "bpf",
+       .type =         TCA_ACT_BPF,
+       .owner =        THIS_MODULE,
+       .act =          tcf_bpf,
+       .dump =         tcf_bpf_dump,
+       .cleanup =      tcf_bpf_cleanup,
+       .init =         tcf_bpf_init,
+};
+
+static int __init bpf_init_module(void)
+{
+       return tcf_register_action(&act_bpf_ops, BPF_TAB_MASK);
+}
+
+static void __exit bpf_cleanup_module(void)
+{
+       tcf_unregister_action(&act_bpf_ops);
+}
+
+module_init(bpf_init_module);
+module_exit(bpf_cleanup_module);
+
+MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
+MODULE_DESCRIPTION("TC BPF based action");
+MODULE_LICENSE("GPL v2");
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
new file mode 100644 (file)
index 0000000..8e47251
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+ * net/sched/act_connmark.c  netfilter connmark retriever action
+ * skb mark is over-written
+ *
+ * Copyright (c) 2011 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/pkt_cls.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/act_api.h>
+#include <uapi/linux/tc_act/tc_connmark.h>
+#include <net/tc_act/tc_connmark.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+
+#define CONNMARK_TAB_MASK     3
+
+static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
+                       struct tcf_result *res)
+{
+       const struct nf_conntrack_tuple_hash *thash;
+       struct nf_conntrack_tuple tuple;
+       enum ip_conntrack_info ctinfo;
+       struct tcf_connmark_info *ca = a->priv;
+       struct nf_conn *c;
+       int proto;
+
+       spin_lock(&ca->tcf_lock);
+       ca->tcf_tm.lastuse = jiffies;
+       bstats_update(&ca->tcf_bstats, skb);
+
+       if (skb->protocol == htons(ETH_P_IP)) {
+               if (skb->len < sizeof(struct iphdr))
+                       goto out;
+
+               proto = NFPROTO_IPV4;
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               if (skb->len < sizeof(struct ipv6hdr))
+                       goto out;
+
+               proto = NFPROTO_IPV6;
+       } else {
+               goto out;
+       }
+
+       c = nf_ct_get(skb, &ctinfo);
+       if (c) {
+               skb->mark = c->mark;
+               /* using overlimits stats to count how many packets marked */
+               ca->tcf_qstats.overlimits++;
+               nf_ct_put(c);
+               goto out;
+       }
+
+       if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+                              proto, &tuple))
+               goto out;
+
+       thash = nf_conntrack_find_get(dev_net(skb->dev), ca->zone, &tuple);
+       if (!thash)
+               goto out;
+
+       c = nf_ct_tuplehash_to_ctrack(thash);
+       /* using overlimits stats to count how many packets marked */
+       ca->tcf_qstats.overlimits++;
+       skb->mark = c->mark;
+       nf_ct_put(c);
+
+out:
+       skb->nfct = NULL;
+       spin_unlock(&ca->tcf_lock);
+       return ca->tcf_action;
+}
+
+static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = {
+       [TCA_CONNMARK_PARMS] = { .len = sizeof(struct tc_connmark) },
+};
+
+static int tcf_connmark_init(struct net *net, struct nlattr *nla,
+                            struct nlattr *est, struct tc_action *a,
+                            int ovr, int bind)
+{
+       struct nlattr *tb[TCA_CONNMARK_MAX + 1];
+       struct tcf_connmark_info *ci;
+       struct tc_connmark *parm;
+       int ret = 0;
+
+       if (!nla)
+               return -EINVAL;
+
+       ret = nla_parse_nested(tb, TCA_CONNMARK_MAX, nla, connmark_policy);
+       if (ret < 0)
+               return ret;
+
+       parm = nla_data(tb[TCA_CONNMARK_PARMS]);
+
+       if (!tcf_hash_check(parm->index, a, bind)) {
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*ci), bind);
+               if (ret)
+                       return ret;
+
+               ci = to_connmark(a);
+               ci->tcf_action = parm->action;
+               ci->zone = parm->zone;
+
+               tcf_hash_insert(a);
+               ret = ACT_P_CREATED;
+       } else {
+               ci = to_connmark(a);
+               if (bind)
+                       return 0;
+               tcf_hash_release(a, bind);
+               if (!ovr)
+                       return -EEXIST;
+               /* replacing action and zone */
+               ci->tcf_action = parm->action;
+               ci->zone = parm->zone;
+       }
+
+       return ret;
+}
+
+static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
+                                   int bind, int ref)
+{
+       unsigned char *b = skb_tail_pointer(skb);
+       struct tcf_connmark_info *ci = a->priv;
+
+       struct tc_connmark opt = {
+               .index   = ci->tcf_index,
+               .refcnt  = ci->tcf_refcnt - ref,
+               .bindcnt = ci->tcf_bindcnt - bind,
+               .action  = ci->tcf_action,
+               .zone   = ci->zone,
+       };
+       struct tcf_t t;
+
+       if (nla_put(skb, TCA_CONNMARK_PARMS, sizeof(opt), &opt))
+               goto nla_put_failure;
+
+       t.install = jiffies_to_clock_t(jiffies - ci->tcf_tm.install);
+       t.lastuse = jiffies_to_clock_t(jiffies - ci->tcf_tm.lastuse);
+       t.expires = jiffies_to_clock_t(ci->tcf_tm.expires);
+       if (nla_put(skb, TCA_CONNMARK_TM, sizeof(t), &t))
+               goto nla_put_failure;
+
+       return skb->len;
+nla_put_failure:
+       nlmsg_trim(skb, b);
+       return -1;
+}
+
+static struct tc_action_ops act_connmark_ops = {
+       .kind           =       "connmark",
+       .type           =       TCA_ACT_CONNMARK,
+       .owner          =       THIS_MODULE,
+       .act            =       tcf_connmark,
+       .dump           =       tcf_connmark_dump,
+       .init           =       tcf_connmark_init,
+};
+
+static int __init connmark_init_module(void)
+{
+       return tcf_register_action(&act_connmark_ops, CONNMARK_TAB_MASK);
+}
+
+static void __exit connmark_cleanup_module(void)
+{
+       tcf_unregister_action(&act_connmark_ops);
+}
+
+module_init(connmark_init_module);
+module_exit(connmark_cleanup_module);
+MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
+MODULE_DESCRIPTION("Connection tracking mark restoring");
+MODULE_LICENSE("GPL");
+
index edbf40dac709df3e88f33948a1c77800dbe25b75..4cd5cf1aedf8b14bc8a8fb0529db868ee74433fd 100644 (file)
@@ -509,7 +509,7 @@ static int tcf_csum(struct sk_buff *skb,
        if (unlikely(action == TC_ACT_SHOT))
                goto drop;
 
-       switch (skb->protocol) {
+       switch (tc_skb_protocol(skb)) {
        case cpu_to_be16(ETH_P_IP):
                if (!tcf_csum_ipv4(skb, update_flags))
                        goto drop;
index aad6a679fb135e9c6492a1b1cd3c1b638d823453..baef987fe2c036ae61f7108455ce1d828ec40e6c 100644 (file)
@@ -556,8 +556,9 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
 }
 EXPORT_SYMBOL(tcf_exts_change);
 
-#define tcf_exts_first_act(ext) \
-               list_first_entry(&(exts)->actions, struct tc_action, list)
+#define tcf_exts_first_act(ext)                                        \
+       list_first_entry_or_null(&(exts)->actions,              \
+                                struct tc_action, list)
 
 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
 {
@@ -603,7 +604,7 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
 {
 #ifdef CONFIG_NET_CLS_ACT
        struct tc_action *a = tcf_exts_first_act(exts);
-       if (tcf_action_copy_stats(skb, a, 1) < 0)
+       if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
                return -1;
 #endif
        return 0;
index 5aed341406c2366ed5c7b50c54f113e21bd27fde..fc399db86f11b17cb05536df8f211fe79c7a3232 100644 (file)
@@ -65,9 +65,12 @@ static unsigned long basic_get(struct tcf_proto *tp, u32 handle)
        if (head == NULL)
                return 0UL;
 
-       list_for_each_entry(f, &head->flist, link)
-               if (f->handle == handle)
+       list_for_each_entry(f, &head->flist, link) {
+               if (f->handle == handle) {
                        l = (unsigned long) f;
+                       break;
+               }
+       }
 
        return l;
 }
index 84c8219c3e1ce18867466786ae061d13178d7519..5f3ee9e4b5bf539e97f9195ceb904dc419634302 100644 (file)
@@ -37,7 +37,7 @@ struct cls_bpf_prog {
        struct tcf_result res;
        struct list_head link;
        u32 handle;
-       u16 bpf_len;
+       u16 bpf_num_ops;
        struct tcf_proto *tp;
        struct rcu_head rcu;
 };
@@ -160,7 +160,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
        struct tcf_exts exts;
        struct sock_fprog_kern tmp;
        struct bpf_prog *fp;
-       u16 bpf_size, bpf_len;
+       u16 bpf_size, bpf_num_ops;
        u32 classid;
        int ret;
 
@@ -173,13 +173,18 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
                return ret;
 
        classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
-       bpf_len = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
-       if (bpf_len > BPF_MAXINSNS || bpf_len == 0) {
+       bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
+       if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) {
+               ret = -EINVAL;
+               goto errout;
+       }
+
+       bpf_size = bpf_num_ops * sizeof(*bpf_ops);
+       if (bpf_size != nla_len(tb[TCA_BPF_OPS])) {
                ret = -EINVAL;
                goto errout;
        }
 
-       bpf_size = bpf_len * sizeof(*bpf_ops);
        bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
        if (bpf_ops == NULL) {
                ret = -ENOMEM;
@@ -188,14 +193,14 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
 
        memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
 
-       tmp.len = bpf_len;
+       tmp.len = bpf_num_ops;
        tmp.filter = bpf_ops;
 
        ret = bpf_prog_create(&fp, &tmp);
        if (ret)
                goto errout_free;
 
-       prog->bpf_len = bpf_len;
+       prog->bpf_num_ops = bpf_num_ops;
        prog->bpf_ops = bpf_ops;
        prog->filter = fp;
        prog->res.classid = classid;
@@ -215,15 +220,21 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
                                   struct cls_bpf_head *head)
 {
        unsigned int i = 0x80000000;
+       u32 handle;
 
        do {
                if (++head->hgen == 0x7FFFFFFF)
                        head->hgen = 1;
        } while (--i > 0 && cls_bpf_get(tp, head->hgen));
-       if (i == 0)
+
+       if (unlikely(i == 0)) {
                pr_err("Insufficient number of handles\n");
+               handle = 0;
+       } else {
+               handle = head->hgen;
+       }
 
-       return i;
+       return handle;
 }
 
 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
@@ -303,10 +314,10 @@ static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
 
        if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
                goto nla_put_failure;
-       if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_len))
+       if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
                goto nla_put_failure;
 
-       nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_len *
+       nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
                          sizeof(struct sock_filter));
        if (nla == NULL)
                goto nla_put_failure;
index 15d68f24a521a8ec66fc42691f031d864f589416..461410394d085917ee8b8039ab5cfd5f5a6cf7c3 100644 (file)
@@ -77,7 +77,7 @@ static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
        if (flow->dst)
                return ntohl(flow->dst);
-       return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
+       return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
 }
 
 static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
@@ -98,7 +98,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys
        if (flow->ports)
                return ntohs(flow->port16[1]);
 
-       return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
+       return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
 }
 
 static u32 flow_get_iif(const struct sk_buff *skb)
@@ -144,7 +144,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
 
 static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       switch (skb->protocol) {
+       switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
                return ntohl(CTTUPLE(skb, src.u3.ip));
        case htons(ETH_P_IPV6):
@@ -156,7 +156,7 @@ fallback:
 
 static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       switch (skb->protocol) {
+       switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
                return ntohl(CTTUPLE(skb, dst.u3.ip));
        case htons(ETH_P_IPV6):
index 5b4a4efe468c927a33fe24e187f5f42ff09c770f..a3d79c8bf3b873b52163e938585bc01957bc6956 100644 (file)
@@ -59,7 +59,7 @@ static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em,
        struct net_device *dev, *indev = NULL;
        int ret, network_offset;
 
-       switch (skb->protocol) {
+       switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
                acpar.family = NFPROTO_IPV4;
                if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
index c8f8c399b99a356835e5713d6b846fe814d0157b..b5294ce20cd467063721cea69348cb35d7999921 100644 (file)
@@ -176,7 +176,7 @@ META_COLLECTOR(int_vlan_tag)
 {
        unsigned short tag;
 
-       tag = vlan_tx_tag_get(skb);
+       tag = skb_vlan_tag_get(skb);
        if (!tag && __vlan_get_tag(skb, &tag))
                *err = -1;
        else
@@ -197,7 +197,7 @@ META_COLLECTOR(int_priority)
 META_COLLECTOR(int_protocol)
 {
        /* Let userspace take care of the byte ordering */
-       dst->value = skb->protocol;
+       dst->value = tc_skb_protocol(skb);
 }
 
 META_COLLECTOR(int_pkttype)
index 76f402e05bd6f7a5ea50653629855039e6940d71..243b7d169d6183f662ab7f30d0e93492b29e79e3 100644 (file)
@@ -1807,7 +1807,7 @@ done:
 int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
                       struct tcf_result *res)
 {
-       __be16 protocol = skb->protocol;
+       __be16 protocol = tc_skb_protocol(skb);
        int err;
 
        for (; tp; tp = rcu_dereference_bh(tp->next)) {
index 227114f27f9408b6010c9c537236ce9f7cb484d9..66700a6116aa98332e4996366378a633b9da552a 100644 (file)
@@ -203,7 +203,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
 
        if (p->set_tc_index) {
-               switch (skb->protocol) {
+               switch (tc_skb_protocol(skb)) {
                case htons(ETH_P_IP):
                        if (skb_cow_head(skb, sizeof(struct iphdr)))
                                goto drop;
@@ -289,7 +289,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
        index = skb->tc_index & (p->indices - 1);
        pr_debug("index %d->%d\n", skb->tc_index, index);
 
-       switch (skb->protocol) {
+       switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
                ipv4_change_dsfield(ip_hdr(skb), p->mask[index],
                                    p->value[index]);
@@ -306,7 +306,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
                 */
                if (p->mask[index] != 0xff || p->value[index])
                        pr_warn("%s: unsupported protocol %d\n",
-                               __func__, ntohs(skb->protocol));
+                               __func__, ntohs(tc_skb_protocol(skb)));
                break;
        }
 
index 9b05924cc386ecc2cdb9816be27e439637fb37b3..dfcea20e31711288aea660add30248b442769979 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
  *
- *  Copyright (C) 2013 Eric Dumazet <edumazet@google.com>
+ *  Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
  *
  *     This program is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License
@@ -52,6 +52,7 @@
 #include <net/pkt_sched.h>
 #include <net/sock.h>
 #include <net/tcp_states.h>
+#include <net/tcp.h>
 
 /*
  * Per flow structure, dynamically allocated
@@ -92,6 +93,7 @@ struct fq_sched_data {
        u32             flow_refill_delay;
        u32             flow_max_rate;  /* optional max rate per flow */
        u32             flow_plimit;    /* max packets per flow */
+       u32             orphan_mask;    /* mask for orphaned skb */
        struct rb_root  *fq_root;
        u8              rate_enable;
        u8              fq_trees_log;
@@ -222,11 +224,20 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
        if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
                return &q->internal;
 
-       if (unlikely(!sk)) {
+       /* SYNACK messages are attached to a listener socket.
+        * 1) They are not part of a 'flow' yet
+        * 2) We do not want to rate limit them (eg SYNFLOOD attack),
+        *    especially if the listener set SO_MAX_PACING_RATE
+        * 3) We pretend they are orphaned
+        */
+       if (!sk || sk->sk_state == TCP_LISTEN) {
+               unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
+
                /* By forcing low order bit to 1, we make sure to not
                 * collide with a local flow (socket pointers are word aligned)
                 */
-               sk = (struct sock *)(skb_get_hash(skb) | 1L);
+               sk = (struct sock *)((hash << 1) | 1UL);
+               skb_orphan(skb);
        }
 
        root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)];
@@ -445,7 +456,9 @@ begin:
                goto begin;
        }
 
-       if (unlikely(f->head && now < f->time_next_packet)) {
+       skb = f->head;
+       if (unlikely(skb && now < f->time_next_packet &&
+                    !skb_is_tcp_pure_ack(skb))) {
                head->first = f->next;
                fq_flow_set_throttled(q, f);
                goto begin;
@@ -464,14 +477,17 @@ begin:
                goto begin;
        }
        prefetch(&skb->end);
-       f->time_next_packet = now;
        f->credit -= qdisc_pkt_len(skb);
 
        if (f->credit > 0 || !q->rate_enable)
                goto out;
 
+       /* Do not pace locally generated ack packets */
+       if (skb_is_tcp_pure_ack(skb))
+               goto out;
+
        rate = q->flow_max_rate;
-       if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT)
+       if (skb->sk)
                rate = min(skb->sk->sk_pacing_rate, rate);
 
        if (rate != ~0U) {
@@ -670,8 +686,14 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
        if (tb[TCA_FQ_FLOW_PLIMIT])
                q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
 
-       if (tb[TCA_FQ_QUANTUM])
-               q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
+       if (tb[TCA_FQ_QUANTUM]) {
+               u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
+
+               if (quantum > 0)
+                       q->quantum = quantum;
+               else
+                       err = -EINVAL;
+       }
 
        if (tb[TCA_FQ_INITIAL_QUANTUM])
                q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
@@ -698,6 +720,9 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
                q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
        }
 
+       if (tb[TCA_FQ_ORPHAN_MASK])
+               q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
+
        if (!err) {
                sch_tree_unlock(sch);
                err = fq_resize(sch, fq_log);
@@ -743,6 +768,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
        q->delayed              = RB_ROOT;
        q->fq_root              = NULL;
        q->fq_trees_log         = ilog2(1024);
+       q->orphan_mask          = 1024 - 1;
        qdisc_watchdog_init(&q->watchdog, sch);
 
        if (opt)
@@ -772,6 +798,7 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
            nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
            nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
                        jiffies_to_usecs(q->flow_refill_delay)) ||
+           nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
            nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
                goto nla_put_failure;
 
index 6ada42396a242123fd7738309b79720d0b46b38e..e02687185a594116b125fdbfc7db49bf8a929797 100644 (file)
@@ -122,13 +122,6 @@ teql_peek(struct Qdisc *sch)
        return NULL;
 }
 
-static inline void
-teql_neigh_release(struct neighbour *n)
-{
-       if (n)
-               neigh_release(n);
-}
-
 static void
 teql_reset(struct Qdisc *sch)
 {
@@ -249,8 +242,8 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
                char haddr[MAX_ADDR_LEN];
 
                neigh_ha_snapshot(haddr, n, dev);
-               err = dev_hard_header(skb, dev, ntohs(skb->protocol), haddr,
-                                     NULL, skb->len);
+               err = dev_hard_header(skb, dev, ntohs(tc_skb_protocol(skb)),
+                                     haddr, NULL, skb->len);
 
                if (err < 0)
                        err = -EINVAL;
index f791edd64d6c0c76dea92888c44530cd1af21a23..197c3f59ecbf1d7975a987e57c13023ac9e2b357 100644 (file)
@@ -391,8 +391,7 @@ void sctp_association_free(struct sctp_association *asoc)
        sctp_asconf_queue_teardown(asoc);
 
        /* Free pending address space being deleted */
-       if (asoc->asconf_addr_del_pending != NULL)
-               kfree(asoc->asconf_addr_del_pending);
+       kfree(asoc->asconf_addr_del_pending);
 
        /* AUTH - Free the endpoint shared keys */
        sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
@@ -1182,7 +1181,6 @@ void sctp_assoc_update(struct sctp_association *asoc,
        asoc->peer.peer_hmacs = new->peer.peer_hmacs;
        new->peer.peer_hmacs = NULL;
 
-       sctp_auth_key_put(asoc->asoc_shared_key);
        sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
 }
 
index e49e231cef529ecaf4bf2bc3e6a168b8f8b6fc06..06320c8c1c8660cdd4e1a56968353169236fb2df 100644 (file)
@@ -2608,7 +2608,7 @@ do_addr_param:
 
                addr_param = param.v + sizeof(sctp_addip_param_t);
 
-               af = sctp_get_af_specific(param_type2af(param.p->type));
+               af = sctp_get_af_specific(param_type2af(addr_param->p.type));
                if (af == NULL)
                        break;
 
index 2625eccb77d5d7738f9e50930ab37a6db6a80760..aafe94bf292e73ecb765a31ae3456c3c11fe932f 100644 (file)
@@ -1603,7 +1603,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
        sctp_assoc_t associd = 0;
        sctp_cmsgs_t cmsgs = { NULL };
        sctp_scope_t scope;
-       bool fill_sinfo_ttl = false;
+       bool fill_sinfo_ttl = false, wait_connect = false;
        struct sctp_datamsg *datamsg;
        int msg_flags = msg->msg_flags;
        __u16 sinfo_flags = 0;
@@ -1943,6 +1943,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                if (err < 0)
                        goto out_free;
 
+               wait_connect = true;
                pr_debug("%s: we associated primitively\n", __func__);
        }
 
@@ -1980,6 +1981,11 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
        sctp_datamsg_put(datamsg);
        err = msg_len;
 
+       if (unlikely(wait_connect)) {
+               timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT);
+               sctp_wait_for_connect(asoc, &timeo);
+       }
+
        /* If we are already past ASSOCIATE, the lower
         * layers are responsible for association cleanup.
         */
index a2c33a4dc7bab45e520556557924b62d24951d85..bbedbfcb42c2505fceb57fa058f262d90e1670ed 100644 (file)
@@ -113,10 +113,8 @@ unsigned int sysctl_net_busy_read __read_mostly;
 unsigned int sysctl_net_busy_poll __read_mostly;
 #endif
 
-static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                        unsigned long nr_segs, loff_t pos);
-static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                         unsigned long nr_segs, loff_t pos);
+static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to);
+static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
 static int sock_mmap(struct file *file, struct vm_area_struct *vma);
 
 static int sock_close(struct inode *inode, struct file *file);
@@ -142,8 +140,10 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
 static const struct file_operations socket_file_ops = {
        .owner =        THIS_MODULE,
        .llseek =       no_llseek,
-       .aio_read =     sock_aio_read,
-       .aio_write =    sock_aio_write,
+       .read =         new_sync_read,
+       .write =        new_sync_write,
+       .read_iter =    sock_read_iter,
+       .write_iter =   sock_write_iter,
        .poll =         sock_poll,
        .unlocked_ioctl = sock_ioctl,
 #ifdef CONFIG_COMPAT
@@ -613,13 +613,6 @@ EXPORT_SYMBOL(__sock_tx_timestamp);
 static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
                                       struct msghdr *msg, size_t size)
 {
-       struct sock_iocb *si = kiocb_to_siocb(iocb);
-
-       si->sock = sock;
-       si->scm = NULL;
-       si->msg = msg;
-       si->size = size;
-
        return sock->ops->sendmsg(iocb, sock, msg, size);
 }
 
@@ -635,11 +628,9 @@ static int do_sock_sendmsg(struct socket *sock, struct msghdr *msg,
                           size_t size, bool nosec)
 {
        struct kiocb iocb;
-       struct sock_iocb siocb;
        int ret;
 
        init_sync_kiocb(&iocb, NULL);
-       iocb.private = &siocb;
        ret = nosec ? __sock_sendmsg_nosec(&iocb, sock, msg, size) :
                      __sock_sendmsg(&iocb, sock, msg, size);
        if (-EIOCBQUEUED == ret)
@@ -756,14 +747,6 @@ EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
 static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock,
                                       struct msghdr *msg, size_t size, int flags)
 {
-       struct sock_iocb *si = kiocb_to_siocb(iocb);
-
-       si->sock = sock;
-       si->scm = NULL;
-       si->msg = msg;
-       si->size = size;
-       si->flags = flags;
-
        return sock->ops->recvmsg(iocb, sock, msg, size, flags);
 }
 
@@ -779,11 +762,9 @@ int sock_recvmsg(struct socket *sock, struct msghdr *msg,
                 size_t size, int flags)
 {
        struct kiocb iocb;
-       struct sock_iocb siocb;
        int ret;
 
        init_sync_kiocb(&iocb, NULL);
-       iocb.private = &siocb;
        ret = __sock_recvmsg(&iocb, sock, msg, size, flags);
        if (-EIOCBQUEUED == ret)
                ret = wait_on_sync_kiocb(&iocb);
@@ -795,11 +776,9 @@ static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
                              size_t size, int flags)
 {
        struct kiocb iocb;
-       struct sock_iocb siocb;
        int ret;
 
        init_sync_kiocb(&iocb, NULL);
-       iocb.private = &siocb;
        ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags);
        if (-EIOCBQUEUED == ret)
                ret = wait_on_sync_kiocb(&iocb);
@@ -866,92 +845,47 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
        return sock->ops->splice_read(sock, ppos, pipe, len, flags);
 }
 
-static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
-                                        struct sock_iocb *siocb)
-{
-       if (!is_sync_kiocb(iocb))
-               BUG();
-
-       siocb->kiocb = iocb;
-       iocb->private = siocb;
-       return siocb;
-}
-
-static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb,
-               struct file *file, const struct iovec *iov,
-               unsigned long nr_segs)
+static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
+       struct file *file = iocb->ki_filp;
        struct socket *sock = file->private_data;
-       size_t size = 0;
-       int i;
-
-       for (i = 0; i < nr_segs; i++)
-               size += iov[i].iov_len;
+       struct msghdr msg = {.msg_iter = *to};
+       ssize_t res;
 
-       msg->msg_name = NULL;
-       msg->msg_namelen = 0;
-       msg->msg_control = NULL;
-       msg->msg_controllen = 0;
-       iov_iter_init(&msg->msg_iter, READ, iov, nr_segs, size);
-       msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
+       if (file->f_flags & O_NONBLOCK)
+               msg.msg_flags = MSG_DONTWAIT;
 
-       return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags);
-}
-
-static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos)
-{
-       struct sock_iocb siocb, *x;
-
-       if (pos != 0)
+       if (iocb->ki_pos != 0)
                return -ESPIPE;
 
        if (iocb->ki_nbytes == 0)       /* Match SYS5 behaviour */
                return 0;
 
-
-       x = alloc_sock_iocb(iocb, &siocb);
-       if (!x)
-               return -ENOMEM;
-       return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
+       res = __sock_recvmsg(iocb, sock, &msg,
+                            iocb->ki_nbytes, msg.msg_flags);
+       *to = msg.msg_iter;
+       return res;
 }
 
-static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb,
-                       struct file *file, const struct iovec *iov,
-                       unsigned long nr_segs)
+static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
+       struct file *file = iocb->ki_filp;
        struct socket *sock = file->private_data;
-       size_t size = 0;
-       int i;
-
-       for (i = 0; i < nr_segs; i++)
-               size += iov[i].iov_len;
-
-       msg->msg_name = NULL;
-       msg->msg_namelen = 0;
-       msg->msg_control = NULL;
-       msg->msg_controllen = 0;
-       iov_iter_init(&msg->msg_iter, WRITE, iov, nr_segs, size);
-       msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
-       if (sock->type == SOCK_SEQPACKET)
-               msg->msg_flags |= MSG_EOR;
-
-       return __sock_sendmsg(iocb, sock, msg, size);
-}
+       struct msghdr msg = {.msg_iter = *from};
+       ssize_t res;
 
-static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                         unsigned long nr_segs, loff_t pos)
-{
-       struct sock_iocb siocb, *x;
-
-       if (pos != 0)
+       if (iocb->ki_pos != 0)
                return -ESPIPE;
 
-       x = alloc_sock_iocb(iocb, &siocb);
-       if (!x)
-               return -ENOMEM;
+       if (file->f_flags & O_NONBLOCK)
+               msg.msg_flags = MSG_DONTWAIT;
+
+       if (sock->type == SOCK_SEQPACKET)
+               msg.msg_flags |= MSG_EOR;
 
-       return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
+       res = __sock_sendmsg(iocb, sock, &msg, iocb->ki_nbytes);
+       *from = msg.msg_iter;
+       return res;
 }
 
 /*
index 1cb61242e55e47e66d5e50e9ad1b0fafd127ac40..4439ac4c1b53fcaf12a8a06723b4a51330dddeae 100644 (file)
@@ -606,7 +606,7 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
        struct kvec *head = buf->head;
        struct kvec *tail = buf->tail;
        int fraglen;
-       int new, old;
+       int new;
 
        if (len > buf->len) {
                WARN_ON_ONCE(1);
@@ -629,8 +629,8 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
        buf->len -= fraglen;
 
        new = buf->page_base + buf->page_len;
-       old = new + fraglen;
-       xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT);
+
+       xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
 
        if (buf->page_len) {
                xdr->p = page_address(*xdr->page_ptr);
index d162b21b14bd23b2f65a7dac64ed2cc1a7e14c3e..8c1e558db11893b7f70eaa5e5201da07a3df8812 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
 #include <linux/netdevice.h>
 #include <net/switchdev.h>
 
@@ -50,3 +52,176 @@ int netdev_switch_port_stp_update(struct net_device *dev, u8 state)
        return ops->ndo_switch_port_stp_update(dev, state);
 }
 EXPORT_SYMBOL(netdev_switch_port_stp_update);
+
+static DEFINE_MUTEX(netdev_switch_mutex);
+static RAW_NOTIFIER_HEAD(netdev_switch_notif_chain);
+
+/**
+ *     register_netdev_switch_notifier - Register nofifier
+ *     @nb: notifier_block
+ *
+ *     Register switch device notifier. This should be used by code
+ *     which needs to monitor events happening in particular device.
+ *     Return values are same as for atomic_notifier_chain_register().
+ */
+int register_netdev_switch_notifier(struct notifier_block *nb)
+{
+       int err;
+
+       mutex_lock(&netdev_switch_mutex);
+       err = raw_notifier_chain_register(&netdev_switch_notif_chain, nb);
+       mutex_unlock(&netdev_switch_mutex);
+       return err;
+}
+EXPORT_SYMBOL(register_netdev_switch_notifier);
+
+/**
+ *     unregister_netdev_switch_notifier - Unregister nofifier
+ *     @nb: notifier_block
+ *
+ *     Unregister switch device notifier.
+ *     Return values are same as for atomic_notifier_chain_unregister().
+ */
+int unregister_netdev_switch_notifier(struct notifier_block *nb)
+{
+       int err;
+
+       mutex_lock(&netdev_switch_mutex);
+       err = raw_notifier_chain_unregister(&netdev_switch_notif_chain, nb);
+       mutex_unlock(&netdev_switch_mutex);
+       return err;
+}
+EXPORT_SYMBOL(unregister_netdev_switch_notifier);
+
+/**
+ *     call_netdev_switch_notifiers - Call nofifiers
+ *     @val: value passed unmodified to notifier function
+ *     @dev: port device
+ *     @info: notifier information data
+ *
+ *     Call all network notifier blocks. This should be called by driver
+ *     when it needs to propagate hardware event.
+ *     Return values are same as for atomic_notifier_call_chain().
+ */
+int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
+                                struct netdev_switch_notifier_info *info)
+{
+       int err;
+
+       info->dev = dev;
+       mutex_lock(&netdev_switch_mutex);
+       err = raw_notifier_call_chain(&netdev_switch_notif_chain, val, info);
+       mutex_unlock(&netdev_switch_mutex);
+       return err;
+}
+EXPORT_SYMBOL(call_netdev_switch_notifiers);
+
+/**
+ *     netdev_switch_port_bridge_setlink - Notify switch device port of bridge
+ *     port attributes
+ *
+ *     @dev: port device
+ *     @nlh: netlink msg with bridge port attributes
+ *     @flags: bridge setlink flags
+ *
+ *     Notify switch device port of bridge port attributes
+ */
+int netdev_switch_port_bridge_setlink(struct net_device *dev,
+                                     struct nlmsghdr *nlh, u16 flags)
+{
+       const struct net_device_ops *ops = dev->netdev_ops;
+
+       if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
+               return 0;
+
+       if (!ops->ndo_bridge_setlink)
+               return -EOPNOTSUPP;
+
+       return ops->ndo_bridge_setlink(dev, nlh, flags);
+}
+EXPORT_SYMBOL(netdev_switch_port_bridge_setlink);
+
+/**
+ *     netdev_switch_port_bridge_dellink - Notify switch device port of bridge
+ *     port attribute delete
+ *
+ *     @dev: port device
+ *     @nlh: netlink msg with bridge port attributes
+ *     @flags: bridge setlink flags
+ *
+ *     Notify switch device port of bridge port attribute delete
+ */
+int netdev_switch_port_bridge_dellink(struct net_device *dev,
+                                     struct nlmsghdr *nlh, u16 flags)
+{
+       const struct net_device_ops *ops = dev->netdev_ops;
+
+       if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
+               return 0;
+
+       if (!ops->ndo_bridge_dellink)
+               return -EOPNOTSUPP;
+
+       return ops->ndo_bridge_dellink(dev, nlh, flags);
+}
+EXPORT_SYMBOL(netdev_switch_port_bridge_dellink);
+
+/**
+ *     ndo_dflt_netdev_switch_port_bridge_setlink - default ndo bridge setlink
+ *                                                  op for master devices
+ *
+ *     @dev: port device
+ *     @nlh: netlink msg with bridge port attributes
+ *     @flags: bridge setlink flags
+ *
+ *     Notify master device slaves of bridge port attributes
+ */
+int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
+                                              struct nlmsghdr *nlh, u16 flags)
+{
+       struct net_device *lower_dev;
+       struct list_head *iter;
+       int ret = 0, err = 0;
+
+       if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
+               return ret;
+
+       netdev_for_each_lower_dev(dev, lower_dev, iter) {
+               err = netdev_switch_port_bridge_setlink(lower_dev, nlh, flags);
+               if (err && err != -EOPNOTSUPP)
+                       ret = err;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(ndo_dflt_netdev_switch_port_bridge_setlink);
+
+/**
+ *     ndo_dflt_netdev_switch_port_bridge_dellink - default ndo bridge dellink
+ *                                                  op for master devices
+ *
+ *     @dev: port device
+ *     @nlh: netlink msg with bridge port attributes
+ *     @flags: bridge dellink flags
+ *
+ *     Notify master device slaves of bridge port attribute deletes
+ */
+int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
+                                              struct nlmsghdr *nlh, u16 flags)
+{
+       struct net_device *lower_dev;
+       struct list_head *iter;
+       int ret = 0, err = 0;
+
+       if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
+               return ret;
+
+       netdev_for_each_lower_dev(dev, lower_dev, iter) {
+               err = netdev_switch_port_bridge_dellink(lower_dev, nlh, flags);
+               if (err && err != -EOPNOTSUPP)
+                       ret = err;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(ndo_dflt_netdev_switch_port_bridge_dellink);
index c890848f9d560fc4cda305653c08f53c712dfbe2..91c8a8e031db718a067fa2ed4ef9f2924ddb7c25 100644 (file)
@@ -20,18 +20,6 @@ menuconfig TIPC
 
          If in doubt, say N.
 
-config TIPC_PORTS
-       int "Maximum number of ports in a node"
-       depends on TIPC
-       range 127 65535
-       default "8191"
-       help
-         Specifies how many ports can be supported by a node.
-         Can range from 127 to 65535 ports; default is 8191.
-
-         Setting this to a smaller value saves some memory,
-         setting it to higher allows for more ports.
-
 config TIPC_MEDIA_IB
        bool "InfiniBand media type support"
        depends on TIPC && INFINIBAND_IPOIB
index 357b74b26f9e9d513d18797fd77834e00be1e12e..48fd3b5a73fbaf934178c444cbba07aa1a0f5b8c 100644 (file)
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "core.h"
+#include <linux/kernel.h>
 #include "addr.h"
+#include "core.h"
+
+/**
+ * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
+ */
+int in_own_cluster(struct net *net, u32 addr)
+{
+       return in_own_cluster_exact(net, addr) || !addr;
+}
+
+int in_own_cluster_exact(struct net *net, u32 addr)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       return !((addr ^ tn->own_addr) >> 12);
+}
+
+/**
+ * in_own_node - test for node inclusion; <0.0.0> always matches
+ */
+int in_own_node(struct net *net, u32 addr)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       return (addr == tn->own_addr) || !addr;
+}
+
+/**
+ * addr_domain - convert 2-bit scope value to equivalent message lookup domain
+ *
+ * Needed when address of a named message must be looked up a second time
+ * after a network hop.
+ */
+u32 addr_domain(struct net *net, u32 sc)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       if (likely(sc == TIPC_NODE_SCOPE))
+               return tn->own_addr;
+       if (sc == TIPC_CLUSTER_SCOPE)
+               return tipc_cluster_mask(tn->own_addr);
+       return tipc_zone_mask(tn->own_addr);
+}
 
 /**
  * tipc_addr_domain_valid - validates a network domain address
index a74acf9ee804b43496d2c53c5ebb26160378eaa6..c700c2d28e09eb6921bb2a211f1cba50b8d08b82 100644 (file)
 #ifndef _TIPC_ADDR_H
 #define _TIPC_ADDR_H
 
-#include "core.h"
+#include <linux/types.h>
+#include <linux/tipc.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
 
 #define TIPC_ZONE_MASK         0xff000000u
 #define TIPC_CLUSTER_MASK      0xfffff000u
@@ -52,42 +55,10 @@ static inline u32 tipc_cluster_mask(u32 addr)
        return addr & TIPC_CLUSTER_MASK;
 }
 
-static inline int in_own_cluster_exact(u32 addr)
-{
-       return !((addr ^ tipc_own_addr) >> 12);
-}
-
-/**
- * in_own_node - test for node inclusion; <0.0.0> always matches
- */
-static inline int in_own_node(u32 addr)
-{
-       return (addr == tipc_own_addr) || !addr;
-}
-
-/**
- * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
- */
-static inline int in_own_cluster(u32 addr)
-{
-       return in_own_cluster_exact(addr) || !addr;
-}
-
-/**
- * addr_domain - convert 2-bit scope value to equivalent message lookup domain
- *
- * Needed when address of a named message must be looked up a second time
- * after a network hop.
- */
-static inline u32 addr_domain(u32 sc)
-{
-       if (likely(sc == TIPC_NODE_SCOPE))
-               return tipc_own_addr;
-       if (sc == TIPC_CLUSTER_SCOPE)
-               return tipc_cluster_mask(tipc_own_addr);
-       return tipc_zone_mask(tipc_own_addr);
-}
-
+int in_own_cluster(struct net *net, u32 addr);
+int in_own_cluster_exact(struct net *net, u32 addr);
+int in_own_node(struct net *net, u32 addr);
+u32 addr_domain(struct net *net, u32 sc);
 int tipc_addr_domain_valid(u32);
 int tipc_addr_node_valid(u32 addr);
 int tipc_in_scope(u32 domain, u32 addr);
index 96ceefeb9daf4eb780cd168b4d008418c0055dd9..81b1fef1f5e014882b3bb07569a532583c82b14d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/bcast.c: TIPC broadcast code
  *
- * Copyright (c) 2004-2006, 2014, Ericsson AB
+ * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
  * Copyright (c) 2004, Intel Corporation.
  * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "core.h"
-#include "link.h"
 #include "socket.h"
 #include "msg.h"
 #include "bcast.h"
 #include "name_distr.h"
+#include "core.h"
 
 #define        MAX_PKT_DEFAULT_MCAST   1500    /* bcast link max packet size (fixed) */
 #define        BCLINK_WIN_DEFAULT      20      /* bcast link window size (default) */
-#define        BCBEARER                MAX_BEARERS
-
-/**
- * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
- * @primary: pointer to primary bearer
- * @secondary: pointer to secondary bearer
- *
- * Bearers must have same priority and same set of reachable destinations
- * to be paired.
- */
-
-struct tipc_bcbearer_pair {
-       struct tipc_bearer *primary;
-       struct tipc_bearer *secondary;
-};
-
-/**
- * struct tipc_bcbearer - bearer used by broadcast link
- * @bearer: (non-standard) broadcast bearer structure
- * @media: (non-standard) broadcast media structure
- * @bpairs: array of bearer pairs
- * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
- * @remains: temporary node map used by tipc_bcbearer_send()
- * @remains_new: temporary node map used tipc_bcbearer_send()
- *
- * Note: The fields labelled "temporary" are incorporated into the bearer
- * to avoid consuming potentially limited stack space through the use of
- * large local variables within multicast routines.  Concurrent access is
- * prevented through use of the spinlock "bclink_lock".
- */
-struct tipc_bcbearer {
-       struct tipc_bearer bearer;
-       struct tipc_media media;
-       struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
-       struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
-       struct tipc_node_map remains;
-       struct tipc_node_map remains_new;
-};
-
-/**
- * struct tipc_bclink - link used for broadcast messages
- * @lock: spinlock governing access to structure
- * @link: (non-standard) broadcast link structure
- * @node: (non-standard) node structure representing b'cast link's peer node
- * @flags: represent bclink states
- * @bcast_nodes: map of broadcast-capable nodes
- * @retransmit_to: node that most recently requested a retransmit
- *
- * Handles sequence numbering, fragmentation, bundling, etc.
- */
-struct tipc_bclink {
-       spinlock_t lock;
-       struct tipc_link link;
-       struct tipc_node node;
-       unsigned int flags;
-       struct tipc_node_map bcast_nodes;
-       struct tipc_node *retransmit_to;
-};
-
-static struct tipc_bcbearer *bcbearer;
-static struct tipc_bclink *bclink;
-static struct tipc_link *bcl;
 
 const char tipc_bclink_name[] = "broadcast-link";
 
@@ -115,38 +52,50 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
 
-static void tipc_bclink_lock(void)
+static void tipc_bclink_lock(struct net *net)
 {
-       spin_lock_bh(&bclink->lock);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       spin_lock_bh(&tn->bclink->lock);
 }
 
-static void tipc_bclink_unlock(void)
+static void tipc_bclink_unlock(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *node = NULL;
 
-       if (likely(!bclink->flags)) {
-               spin_unlock_bh(&bclink->lock);
+       if (likely(!tn->bclink->flags)) {
+               spin_unlock_bh(&tn->bclink->lock);
                return;
        }
 
-       if (bclink->flags & TIPC_BCLINK_RESET) {
-               bclink->flags &= ~TIPC_BCLINK_RESET;
-               node = tipc_bclink_retransmit_to();
+       if (tn->bclink->flags & TIPC_BCLINK_RESET) {
+               tn->bclink->flags &= ~TIPC_BCLINK_RESET;
+               node = tipc_bclink_retransmit_to(net);
        }
-       spin_unlock_bh(&bclink->lock);
+       spin_unlock_bh(&tn->bclink->lock);
 
        if (node)
                tipc_link_reset_all(node);
 }
 
+void tipc_bclink_input(struct net *net)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
+}
+
 uint  tipc_bclink_get_mtu(void)
 {
        return MAX_PKT_DEFAULT_MCAST;
 }
 
-void tipc_bclink_set_flags(unsigned int flags)
+void tipc_bclink_set_flags(struct net *net, unsigned int flags)
 {
-       bclink->flags |= flags;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       tn->bclink->flags |= flags;
 }
 
 static u32 bcbuf_acks(struct sk_buff *buf)
@@ -164,31 +113,40 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
        bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
 }
 
-void tipc_bclink_add_node(u32 addr)
+void tipc_bclink_add_node(struct net *net, u32 addr)
 {
-       tipc_bclink_lock();
-       tipc_nmap_add(&bclink->bcast_nodes, addr);
-       tipc_bclink_unlock();
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       tipc_bclink_lock(net);
+       tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
+       tipc_bclink_unlock(net);
 }
 
-void tipc_bclink_remove_node(u32 addr)
+void tipc_bclink_remove_node(struct net *net, u32 addr)
 {
-       tipc_bclink_lock();
-       tipc_nmap_remove(&bclink->bcast_nodes, addr);
-       tipc_bclink_unlock();
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       tipc_bclink_lock(net);
+       tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
+       tipc_bclink_unlock(net);
 }
 
-static void bclink_set_last_sent(void)
+static void bclink_set_last_sent(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
+
        if (bcl->next_out)
                bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
        else
                bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
 }
 
-u32 tipc_bclink_get_last_sent(void)
+u32 tipc_bclink_get_last_sent(struct net *net)
 {
-       return bcl->fsm_msg_cnt;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       return tn->bcl->fsm_msg_cnt;
 }
 
 static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
@@ -203,9 +161,11 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
  *
  * Called with bclink_lock locked
  */
-struct tipc_node *tipc_bclink_retransmit_to(void)
+struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
 {
-       return bclink->retransmit_to;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       return tn->bclink->retransmit_to;
 }
 
 /**
@@ -215,15 +175,17 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
  *
  * Called with bclink_lock locked
  */
-static void bclink_retransmit_pkt(u32 after, u32 to)
+static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
 {
        struct sk_buff *skb;
+       struct tipc_link *bcl = tn->bcl;
 
        skb_queue_walk(&bcl->outqueue, skb) {
-               if (more(buf_seqno(skb), after))
+               if (more(buf_seqno(skb), after)) {
+                       tipc_link_retransmit(bcl, skb, mod(to - after));
                        break;
+               }
        }
-       tipc_link_retransmit(bcl, skb, mod(to - after));
 }
 
 /**
@@ -231,13 +193,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
  *
  * Called with no locks taken
  */
-void tipc_bclink_wakeup_users(void)
+void tipc_bclink_wakeup_users(struct net *net)
 {
-       struct sk_buff *skb;
-
-       while ((skb = skb_dequeue(&bclink->link.waiting_sks)))
-               tipc_sk_rcv(skb);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
+       tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
 }
 
 /**
@@ -252,10 +212,12 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
        struct sk_buff *skb, *tmp;
        struct sk_buff *next;
        unsigned int released = 0;
+       struct net *net = n_ptr->net;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
        /* Bail out if tx queue is empty (no clean up is required) */
-       skb = skb_peek(&bcl->outqueue);
+       skb = skb_peek(&tn->bcl->outqueue);
        if (!skb)
                goto exit;
 
@@ -266,43 +228,43 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
                 * acknowledge sent messages only (if other nodes still exist)
                 * or both sent and unsent messages (otherwise)
                 */
-               if (bclink->bcast_nodes.count)
-                       acked = bcl->fsm_msg_cnt;
+               if (tn->bclink->bcast_nodes.count)
+                       acked = tn->bcl->fsm_msg_cnt;
                else
-                       acked = bcl->next_out_no;
+                       acked = tn->bcl->next_out_no;
        } else {
                /*
                 * Bail out if specified sequence number does not correspond
                 * to a message that has been sent and not yet acknowledged
                 */
                if (less(acked, buf_seqno(skb)) ||
-                   less(bcl->fsm_msg_cnt, acked) ||
+                   less(tn->bcl->fsm_msg_cnt, acked) ||
                    less_eq(acked, n_ptr->bclink.acked))
                        goto exit;
        }
 
        /* Skip over packets that node has previously acknowledged */
-       skb_queue_walk(&bcl->outqueue, skb) {
+       skb_queue_walk(&tn->bcl->outqueue, skb) {
                if (more(buf_seqno(skb), n_ptr->bclink.acked))
                        break;
        }
 
        /* Update packets that node is now acknowledging */
-       skb_queue_walk_from_safe(&bcl->outqueue, skb, tmp) {
+       skb_queue_walk_from_safe(&tn->bcl->outqueue, skb, tmp) {
                if (more(buf_seqno(skb), acked))
                        break;
 
-               next = tipc_skb_queue_next(&bcl->outqueue, skb);
-               if (skb != bcl->next_out) {
+               next = tipc_skb_queue_next(&tn->bcl->outqueue, skb);
+               if (skb != tn->bcl->next_out) {
                        bcbuf_decr_acks(skb);
                } else {
                        bcbuf_set_acks(skb, 0);
-                       bcl->next_out = next;
-                       bclink_set_last_sent();
+                       tn->bcl->next_out = next;
+                       bclink_set_last_sent(net);
                }
 
                if (bcbuf_acks(skb) == 0) {
-                       __skb_unlink(skb, &bcl->outqueue);
+                       __skb_unlink(skb, &tn->bcl->outqueue);
                        kfree_skb(skb);
                        released = 1;
                }
@@ -310,15 +272,14 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
        n_ptr->bclink.acked = acked;
 
        /* Try resolving broadcast link congestion, if necessary */
-       if (unlikely(bcl->next_out)) {
-               tipc_link_push_packets(bcl);
-               bclink_set_last_sent();
+       if (unlikely(tn->bcl->next_out)) {
+               tipc_link_push_packets(tn->bcl);
+               bclink_set_last_sent(net);
        }
-       if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks)))
+       if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
                n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
-
 exit:
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
 }
 
 /**
@@ -326,9 +287,12 @@ exit:
  *
  * RCU and node lock set
  */
-void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
+void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
+                                  u32 last_sent)
 {
        struct sk_buff *buf;
+       struct net *net = n_ptr->net;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
        /* Ignore "stale" link state info */
        if (less_eq(last_sent, n_ptr->bclink.last_in))
@@ -358,18 +322,18 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
                struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue);
                u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
 
-               tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
+               tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
                              INT_H_SIZE, n_ptr->addr);
                msg_set_non_seq(msg, 1);
-               msg_set_mc_netid(msg, tipc_net_id);
+               msg_set_mc_netid(msg, tn->net_id);
                msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
                msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
                msg_set_bcgap_to(msg, to);
 
-               tipc_bclink_lock();
-               tipc_bearer_send(MAX_BEARERS, buf, NULL);
-               bcl->stats.sent_nacks++;
-               tipc_bclink_unlock();
+               tipc_bclink_lock(net);
+               tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
+               tn->bcl->stats.sent_nacks++;
+               tipc_bclink_unlock(net);
                kfree_skb(buf);
 
                n_ptr->bclink.oos_state++;
@@ -382,9 +346,9 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
  * Delay any upcoming NACK by this node if another node has already
  * requested the first message this node is going to ask for.
  */
-static void bclink_peek_nack(struct tipc_msg *msg)
+static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
 {
-       struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg));
+       struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
 
        if (unlikely(!n_ptr))
                return;
@@ -399,17 +363,23 @@ static void bclink_peek_nack(struct tipc_msg *msg)
        tipc_node_unlock(n_ptr);
 }
 
-/* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
+/* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
  *                    and to identified node local sockets
+ * @net: the applicable net namespace
  * @list: chain of buffers containing message
  * Consumes the buffer chain, except when returning -ELINKCONG
  * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
  */
-int tipc_bclink_xmit(struct sk_buff_head *list)
+int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
+       struct tipc_bclink *bclink = tn->bclink;
        int rc = 0;
        int bc = 0;
        struct sk_buff *skb;
+       struct sk_buff_head arrvq;
+       struct sk_buff_head inputq;
 
        /* Prepare clone of message for local node */
        skb = tipc_msg_reassemble(list);
@@ -418,32 +388,35 @@ int tipc_bclink_xmit(struct sk_buff_head *list)
                return -EHOSTUNREACH;
        }
 
-       /* Broadcast to all other nodes */
+       /* Broadcast to all nodes */
        if (likely(bclink)) {
-               tipc_bclink_lock();
+               tipc_bclink_lock(net);
                if (likely(bclink->bcast_nodes.count)) {
-                       rc = __tipc_link_xmit(bcl, list);
+                       rc = __tipc_link_xmit(net, bcl, list);
                        if (likely(!rc)) {
                                u32 len = skb_queue_len(&bcl->outqueue);
 
-                               bclink_set_last_sent();
+                               bclink_set_last_sent(net);
                                bcl->stats.queue_sz_counts++;
                                bcl->stats.accu_queue_sz += len;
                        }
                        bc = 1;
                }
-               tipc_bclink_unlock();
+               tipc_bclink_unlock(net);
        }
 
        if (unlikely(!bc))
                __skb_queue_purge(list);
 
-       /* Deliver message clone */
-       if (likely(!rc))
-               tipc_sk_mcast_rcv(skb);
-       else
+       if (unlikely(rc)) {
                kfree_skb(skb);
-
+               return rc;
+       }
+       /* Deliver message clone */
+       __skb_queue_head_init(&arrvq);
+       skb_queue_head_init(&inputq);
+       __skb_queue_tail(&arrvq, skb);
+       tipc_sk_mcast_rcv(net, &arrvq, &inputq);
        return rc;
 }
 
@@ -454,19 +427,21 @@ int tipc_bclink_xmit(struct sk_buff_head *list)
  */
 static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
 {
+       struct tipc_net *tn = net_generic(node->net, tipc_net_id);
+
        bclink_update_last_sent(node, seqno);
        node->bclink.last_in = seqno;
        node->bclink.oos_state = 0;
-       bcl->stats.recv_info++;
+       tn->bcl->stats.recv_info++;
 
        /*
         * Unicast an ACK periodically, ensuring that
         * all nodes in the cluster don't ACK at the same time
         */
-       if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
+       if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
                tipc_link_proto_xmit(node->active_links[node->addr & 1],
                                     STATE_MSG, 0, 0, 0, 0, 0);
-               bcl->stats.sent_acks++;
+               tn->bcl->stats.sent_acks++;
        }
 }
 
@@ -475,19 +450,24 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
  *
  * RCU is locked, no other locks set
  */
-void tipc_bclink_rcv(struct sk_buff *buf)
+void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
        struct tipc_msg *msg = buf_msg(buf);
        struct tipc_node *node;
        u32 next_in;
        u32 seqno;
        int deferred = 0;
+       int pos = 0;
+       struct sk_buff *iskb;
+       struct sk_buff_head *arrvq, *inputq;
 
        /* Screen out unwanted broadcast messages */
-       if (msg_mc_netid(msg) != tipc_net_id)
+       if (msg_mc_netid(msg) != tn->net_id)
                goto exit;
 
-       node = tipc_node_find(msg_prevnode(msg));
+       node = tipc_node_find(net, msg_prevnode(msg));
        if (unlikely(!node))
                goto exit;
 
@@ -499,18 +479,18 @@ void tipc_bclink_rcv(struct sk_buff *buf)
        if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
                if (msg_type(msg) != STATE_MSG)
                        goto unlock;
-               if (msg_destnode(msg) == tipc_own_addr) {
+               if (msg_destnode(msg) == tn->own_addr) {
                        tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
                        tipc_node_unlock(node);
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bcl->stats.recv_nacks++;
-                       bclink->retransmit_to = node;
-                       bclink_retransmit_pkt(msg_bcgap_after(msg),
+                       tn->bclink->retransmit_to = node;
+                       bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
                                              msg_bcgap_to(msg));
-                       tipc_bclink_unlock();
+                       tipc_bclink_unlock(net);
                } else {
                        tipc_node_unlock(node);
-                       bclink_peek_nack(msg);
+                       bclink_peek_nack(net, msg);
                }
                goto exit;
        }
@@ -518,52 +498,54 @@ void tipc_bclink_rcv(struct sk_buff *buf)
        /* Handle in-sequence broadcast message */
        seqno = msg_seqno(msg);
        next_in = mod(node->bclink.last_in + 1);
+       arrvq = &tn->bclink->arrvq;
+       inputq = &tn->bclink->inputq;
 
        if (likely(seqno == next_in)) {
 receive:
                /* Deliver message to destination */
                if (likely(msg_isdata(msg))) {
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bclink_accept_pkt(node, seqno);
-                       tipc_bclink_unlock();
+                       spin_lock_bh(&inputq->lock);
+                       __skb_queue_tail(arrvq, buf);
+                       spin_unlock_bh(&inputq->lock);
+                       node->action_flags |= TIPC_BCAST_MSG_EVT;
+                       tipc_bclink_unlock(net);
                        tipc_node_unlock(node);
-                       if (likely(msg_mcast(msg)))
-                               tipc_sk_mcast_rcv(buf);
-                       else
-                               kfree_skb(buf);
                } else if (msg_user(msg) == MSG_BUNDLER) {
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bclink_accept_pkt(node, seqno);
                        bcl->stats.recv_bundles++;
                        bcl->stats.recv_bundled += msg_msgcnt(msg);
-                       tipc_bclink_unlock();
+                       pos = 0;
+                       while (tipc_msg_extract(buf, &iskb, &pos)) {
+                               spin_lock_bh(&inputq->lock);
+                               __skb_queue_tail(arrvq, iskb);
+                               spin_unlock_bh(&inputq->lock);
+                       }
+                       node->action_flags |= TIPC_BCAST_MSG_EVT;
+                       tipc_bclink_unlock(net);
                        tipc_node_unlock(node);
-                       tipc_link_bundle_rcv(buf);
                } else if (msg_user(msg) == MSG_FRAGMENTER) {
                        tipc_buf_append(&node->bclink.reasm_buf, &buf);
                        if (unlikely(!buf && !node->bclink.reasm_buf))
                                goto unlock;
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bclink_accept_pkt(node, seqno);
                        bcl->stats.recv_fragments++;
                        if (buf) {
                                bcl->stats.recv_fragmented++;
                                msg = buf_msg(buf);
-                               tipc_bclink_unlock();
+                               tipc_bclink_unlock(net);
                                goto receive;
                        }
-                       tipc_bclink_unlock();
-                       tipc_node_unlock(node);
-               } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
-                       tipc_bclink_lock();
-                       bclink_accept_pkt(node, seqno);
-                       tipc_bclink_unlock();
+                       tipc_bclink_unlock(net);
                        tipc_node_unlock(node);
-                       tipc_named_rcv(buf);
                } else {
-                       tipc_bclink_lock();
+                       tipc_bclink_lock(net);
                        bclink_accept_pkt(node, seqno);
-                       tipc_bclink_unlock();
+                       tipc_bclink_unlock(net);
                        tipc_node_unlock(node);
                        kfree_skb(buf);
                }
@@ -601,14 +583,14 @@ receive:
                buf = NULL;
        }
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
 
        if (deferred)
                bcl->stats.deferred_recv++;
        else
                bcl->stats.duplicates++;
 
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
 
 unlock:
        tipc_node_unlock(node);
@@ -619,7 +601,7 @@ exit:
 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
 {
        return (n_ptr->bclink.recv_permitted &&
-               (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
+               (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
 }
 
 
@@ -632,11 +614,15 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
  * Returns 0 (packet sent successfully) under all circumstances,
  * since the broadcast link's pseudo-bearer never blocks
  */
-static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
+static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
+                             struct tipc_bearer *unused1,
                              struct tipc_media_addr *unused2)
 {
        int bp_index;
        struct tipc_msg *msg = buf_msg(buf);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_bcbearer *bcbearer = tn->bcbearer;
+       struct tipc_bclink *bclink = tn->bclink;
 
        /* Prepare broadcast link message for reliable transmission,
         * if first time trying to send it;
@@ -646,8 +632,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
        if (likely(!msg_non_seq(buf_msg(buf)))) {
                bcbuf_set_acks(buf, bclink->bcast_nodes.count);
                msg_set_non_seq(msg, 1);
-               msg_set_mc_netid(msg, tipc_net_id);
-               bcl->stats.sent_info++;
+               msg_set_mc_netid(msg, tn->net_id);
+               tn->bcl->stats.sent_info++;
 
                if (WARN_ON(!bclink->bcast_nodes.count)) {
                        dump_stack();
@@ -676,13 +662,14 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
 
                if (bp_index == 0) {
                        /* Use original buffer for first bearer */
-                       tipc_bearer_send(b->identity, buf, &b->bcast_addr);
+                       tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
                } else {
                        /* Avoid concurrent buffer access */
                        tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
                        if (!tbuf)
                                break;
-                       tipc_bearer_send(b->identity, tbuf, &b->bcast_addr);
+                       tipc_bearer_send(net, b->identity, tbuf,
+                                        &b->bcast_addr);
                        kfree_skb(tbuf); /* Bearer keeps a clone */
                }
                if (bcbearer->remains_new.count == 0)
@@ -697,15 +684,18 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
 /**
  * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
  */
-void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
+void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
+                       u32 node, bool action)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_bcbearer *bcbearer = tn->bcbearer;
        struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
        struct tipc_bcbearer_pair *bp_curr;
        struct tipc_bearer *b;
        int b_index;
        int pri;
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
 
        if (action)
                tipc_nmap_add(nm_ptr, node);
@@ -717,7 +707,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
 
        rcu_read_lock();
        for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
-               b = rcu_dereference_rtnl(bearer_list[b_index]);
+               b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
                if (!b || !b->nodes.count)
                        continue;
 
@@ -752,7 +742,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
                bp_curr++;
        }
 
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
 }
 
 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
@@ -806,17 +796,19 @@ msg_full:
        return -EMSGSIZE;
 }
 
-int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
+int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
 {
        int err;
        void *hdr;
        struct nlattr *attrs;
        struct nlattr *prop;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
 
        if (!bcl)
                return 0;
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
 
        hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
                          NLM_F_MULTI, TIPC_NL_LINK_GET);
@@ -851,7 +843,7 @@ int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
        if (err)
                goto attr_msg_full;
 
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
        nla_nest_end(msg->skb, attrs);
        genlmsg_end(msg->skb, hdr);
 
@@ -862,21 +854,23 @@ prop_msg_full:
 attr_msg_full:
        nla_nest_cancel(msg->skb, attrs);
 msg_full:
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
        genlmsg_cancel(msg->skb, hdr);
 
        return -EMSGSIZE;
 }
 
-int tipc_bclink_stats(char *buf, const u32 buf_size)
+int tipc_bclink_stats(struct net *net, char *buf, const u32 buf_size)
 {
        int ret;
        struct tipc_stats *s;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
 
        if (!bcl)
                return 0;
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
 
        s = &bcl->stats;
 
@@ -905,36 +899,47 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
                             s->queue_sz_counts ?
                             (s->accu_queue_sz / s->queue_sz_counts) : 0);
 
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
        return ret;
 }
 
-int tipc_bclink_reset_stats(void)
+int tipc_bclink_reset_stats(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
+
        if (!bcl)
                return -ENOPROTOOPT;
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
        memset(&bcl->stats, 0, sizeof(bcl->stats));
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
        return 0;
 }
 
-int tipc_bclink_set_queue_limits(u32 limit)
+int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
+
        if (!bcl)
                return -ENOPROTOOPT;
        if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
                return -EINVAL;
 
-       tipc_bclink_lock();
+       tipc_bclink_lock(net);
        tipc_link_set_queue_limits(bcl, limit);
-       tipc_bclink_unlock();
+       tipc_bclink_unlock(net);
        return 0;
 }
 
-int tipc_bclink_init(void)
+int tipc_bclink_init(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_bcbearer *bcbearer;
+       struct tipc_bclink *bclink;
+       struct tipc_link *bcl;
+
        bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
        if (!bcbearer)
                return -ENOMEM;
@@ -953,30 +958,39 @@ int tipc_bclink_init(void)
        spin_lock_init(&bclink->lock);
        __skb_queue_head_init(&bcl->outqueue);
        __skb_queue_head_init(&bcl->deferred_queue);
-       skb_queue_head_init(&bcl->waiting_sks);
+       skb_queue_head_init(&bcl->wakeupq);
        bcl->next_out_no = 1;
        spin_lock_init(&bclink->node.lock);
-       __skb_queue_head_init(&bclink->node.waiting_sks);
+       __skb_queue_head_init(&bclink->arrvq);
+       skb_queue_head_init(&bclink->inputq);
        bcl->owner = &bclink->node;
+       bcl->owner->net = net;
        bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
        tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
        bcl->bearer_id = MAX_BEARERS;
-       rcu_assign_pointer(bearer_list[MAX_BEARERS], &bcbearer->bearer);
+       rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
        bcl->state = WORKING_WORKING;
+       bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
+       msg_set_prevnode(bcl->pmsg, tn->own_addr);
        strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
+       tn->bcbearer = bcbearer;
+       tn->bclink = bclink;
+       tn->bcl = bcl;
        return 0;
 }
 
-void tipc_bclink_stop(void)
+void tipc_bclink_stop(struct net *net)
 {
-       tipc_bclink_lock();
-       tipc_link_purge_queues(bcl);
-       tipc_bclink_unlock();
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       tipc_bclink_lock(net);
+       tipc_link_purge_queues(tn->bcl);
+       tipc_bclink_unlock(net);
 
-       RCU_INIT_POINTER(bearer_list[BCBEARER], NULL);
+       RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
        synchronize_net();
-       kfree(bcbearer);
-       kfree(bclink);
+       kfree(tn->bcbearer);
+       kfree(tn->bclink);
 }
 
 /**
@@ -1036,50 +1050,3 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
                }
        }
 }
-
-/**
- * tipc_port_list_add - add a port to a port list, ensuring no duplicates
- */
-void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
-{
-       struct tipc_port_list *item = pl_ptr;
-       int i;
-       int item_sz = PLSIZE;
-       int cnt = pl_ptr->count;
-
-       for (; ; cnt -= item_sz, item = item->next) {
-               if (cnt < PLSIZE)
-                       item_sz = cnt;
-               for (i = 0; i < item_sz; i++)
-                       if (item->ports[i] == port)
-                               return;
-               if (i < PLSIZE) {
-                       item->ports[i] = port;
-                       pl_ptr->count++;
-                       return;
-               }
-               if (!item->next) {
-                       item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
-                       if (!item->next) {
-                               pr_warn("Incomplete multicast delivery, no memory\n");
-                               return;
-                       }
-                       item->next->next = NULL;
-               }
-       }
-}
-
-/**
- * tipc_port_list_free - free dynamically created entries in port_list chain
- *
- */
-void tipc_port_list_free(struct tipc_port_list *pl_ptr)
-{
-       struct tipc_port_list *item;
-       struct tipc_port_list *next;
-
-       for (item = pl_ptr->next; item; item = next) {
-               next = item->next;
-               kfree(item);
-       }
-}
index 644d79129fbaeb1ac3022fd02f9a6c4dafca9b7d..a910c0b9f249e85a4433e82c6d822c7939303f3b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/bcast.h: Include file for TIPC broadcast code
  *
- * Copyright (c) 2003-2006, 2014, Ericsson AB
+ * Copyright (c) 2003-2006, 2014-2015, Ericsson AB
  * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
 #ifndef _TIPC_BCAST_H
 #define _TIPC_BCAST_H
 
-#include "netlink.h"
-
-#define MAX_NODES 4096
-#define WSIZE 32
-#define TIPC_BCLINK_RESET 1
+#include <linux/tipc_config.h>
+#include "link.h"
+#include "node.h"
 
 /**
- * struct tipc_node_map - set of node identifiers
- * @count: # of nodes in set
- * @map: bitmap of node identifiers that are in the set
+ * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
+ * @primary: pointer to primary bearer
+ * @secondary: pointer to secondary bearer
+ *
+ * Bearers must have same priority and same set of reachable destinations
+ * to be paired.
  */
-struct tipc_node_map {
-       u32 count;
-       u32 map[MAX_NODES / WSIZE];
+
+struct tipc_bcbearer_pair {
+       struct tipc_bearer *primary;
+       struct tipc_bearer *secondary;
 };
 
-#define PLSIZE 32
+#define TIPC_BCLINK_RESET      1
+#define        BCBEARER                MAX_BEARERS
 
 /**
- * struct tipc_port_list - set of node local destination ports
- * @count: # of ports in set (only valid for first entry in list)
- * @next: pointer to next entry in list
- * @ports: array of port references
+ * struct tipc_bcbearer - bearer used by broadcast link
+ * @bearer: (non-standard) broadcast bearer structure
+ * @media: (non-standard) broadcast media structure
+ * @bpairs: array of bearer pairs
+ * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
+ * @remains: temporary node map used by tipc_bcbearer_send()
+ * @remains_new: temporary node map used tipc_bcbearer_send()
+ *
+ * Note: The fields labelled "temporary" are incorporated into the bearer
+ * to avoid consuming potentially limited stack space through the use of
+ * large local variables within multicast routines.  Concurrent access is
+ * prevented through use of the spinlock "bclink_lock".
  */
-struct tipc_port_list {
-       int count;
-       struct tipc_port_list *next;
-       u32 ports[PLSIZE];
+struct tipc_bcbearer {
+       struct tipc_bearer bearer;
+       struct tipc_media media;
+       struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
+       struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
+       struct tipc_node_map remains;
+       struct tipc_node_map remains_new;
 };
 
+/**
+ * struct tipc_bclink - link used for broadcast messages
+ * @lock: spinlock governing access to structure
+ * @link: (non-standard) broadcast link structure
+ * @node: (non-standard) node structure representing b'cast link's peer node
+ * @flags: represent bclink states
+ * @bcast_nodes: map of broadcast-capable nodes
+ * @retransmit_to: node that most recently requested a retransmit
+ *
+ * Handles sequence numbering, fragmentation, bundling, etc.
+ */
+struct tipc_bclink {
+       spinlock_t lock;
+       struct tipc_link link;
+       struct tipc_node node;
+       unsigned int flags;
+       struct sk_buff_head arrvq;
+       struct sk_buff_head inputq;
+       struct tipc_node_map bcast_nodes;
+       struct tipc_node *retransmit_to;
+};
 
 struct tipc_node;
-
 extern const char tipc_bclink_name[];
 
 /**
@@ -81,27 +115,27 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
        return !memcmp(nm_a, nm_b, sizeof(*nm_a));
 }
 
-void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port);
-void tipc_port_list_free(struct tipc_port_list *pl_ptr);
-
-int tipc_bclink_init(void);
-void tipc_bclink_stop(void);
-void tipc_bclink_set_flags(unsigned int flags);
-void tipc_bclink_add_node(u32 addr);
-void tipc_bclink_remove_node(u32 addr);
-struct tipc_node *tipc_bclink_retransmit_to(void);
+int tipc_bclink_init(struct net *net);
+void tipc_bclink_stop(struct net *net);
+void tipc_bclink_set_flags(struct net *tn, unsigned int flags);
+void tipc_bclink_add_node(struct net *net, u32 addr);
+void tipc_bclink_remove_node(struct net *net, u32 addr);
+struct tipc_node *tipc_bclink_retransmit_to(struct net *tn);
 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
-void tipc_bclink_rcv(struct sk_buff *buf);
-u32  tipc_bclink_get_last_sent(void);
+void tipc_bclink_rcv(struct net *net, struct sk_buff *buf);
+u32  tipc_bclink_get_last_sent(struct net *net);
 u32  tipc_bclink_acks_missing(struct tipc_node *n_ptr);
-void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent);
-int  tipc_bclink_stats(char *stats_buf, const u32 buf_size);
-int  tipc_bclink_reset_stats(void);
-int  tipc_bclink_set_queue_limits(u32 limit);
-void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
+void tipc_bclink_update_link_state(struct tipc_node *node,
+                                  u32 last_sent);
+int  tipc_bclink_stats(struct net *net, char *stats_buf, const u32 buf_size);
+int  tipc_bclink_reset_stats(struct net *net);
+int  tipc_bclink_set_queue_limits(struct net *net, u32 limit);
+void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
+                       u32 node, bool action);
 uint  tipc_bclink_get_mtu(void);
-int tipc_bclink_xmit(struct sk_buff_head *list);
-void tipc_bclink_wakeup_users(void);
-int tipc_nl_add_bc_link(struct tipc_nl_msg *msg);
+int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list);
+void tipc_bclink_wakeup_users(struct net *net);
+int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
+void tipc_bclink_input(struct net *net);
 
 #endif
index 463db5b15b8b6ca662447a63b99dcdbe75de0531..33dc3486d16c4014ffaff7c0d49e01089db5ca46 100644 (file)
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <net/sock.h>
 #include "core.h"
 #include "config.h"
 #include "bearer.h"
 #include "link.h"
 #include "discover.h"
+#include "bcast.h"
 
 #define MAX_ADDR_STR 60
 
@@ -67,9 +69,8 @@ static const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = {
        [TIPC_NLA_MEDIA_PROP]           = { .type = NLA_NESTED }
 };
 
-struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
-
-static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down);
+static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
+                          bool shutting_down);
 
 /**
  * tipc_media_find - locates specified media object by name
@@ -190,13 +191,14 @@ static int bearer_name_validate(const char *name,
 /**
  * tipc_bearer_find - locates bearer object with matching bearer name
  */
-struct tipc_bearer *tipc_bearer_find(const char *name)
+struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_bearer *b_ptr;
        u32 i;
 
        for (i = 0; i < MAX_BEARERS; i++) {
-               b_ptr = rtnl_dereference(bearer_list[i]);
+               b_ptr = rtnl_dereference(tn->bearer_list[i]);
                if (b_ptr && (!strcmp(b_ptr->name, name)))
                        return b_ptr;
        }
@@ -206,8 +208,9 @@ struct tipc_bearer *tipc_bearer_find(const char *name)
 /**
  * tipc_bearer_get_names - record names of bearers in buffer
  */
-struct sk_buff *tipc_bearer_get_names(void)
+struct sk_buff *tipc_bearer_get_names(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff *buf;
        struct tipc_bearer *b;
        int i, j;
@@ -218,7 +221,7 @@ struct sk_buff *tipc_bearer_get_names(void)
 
        for (i = 0; media_info_array[i] != NULL; i++) {
                for (j = 0; j < MAX_BEARERS; j++) {
-                       b = rtnl_dereference(bearer_list[j]);
+                       b = rtnl_dereference(tn->bearer_list[j]);
                        if (!b)
                                continue;
                        if (b->media == media_info_array[i]) {
@@ -231,27 +234,29 @@ struct sk_buff *tipc_bearer_get_names(void)
        return buf;
 }
 
-void tipc_bearer_add_dest(u32 bearer_id, u32 dest)
+void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_bearer *b_ptr;
 
        rcu_read_lock();
-       b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
+       b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
        if (b_ptr) {
-               tipc_bcbearer_sort(&b_ptr->nodes, dest, true);
+               tipc_bcbearer_sort(net, &b_ptr->nodes, dest, true);
                tipc_disc_add_dest(b_ptr->link_req);
        }
        rcu_read_unlock();
 }
 
-void tipc_bearer_remove_dest(u32 bearer_id, u32 dest)
+void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_bearer *b_ptr;
 
        rcu_read_lock();
-       b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
+       b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
        if (b_ptr) {
-               tipc_bcbearer_sort(&b_ptr->nodes, dest, false);
+               tipc_bcbearer_sort(net, &b_ptr->nodes, dest, false);
                tipc_disc_remove_dest(b_ptr->link_req);
        }
        rcu_read_unlock();
@@ -260,8 +265,10 @@ void tipc_bearer_remove_dest(u32 bearer_id, u32 dest)
 /**
  * tipc_enable_bearer - enable bearer with the given name
  */
-int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
+int tipc_enable_bearer(struct net *net, const char *name, u32 disc_domain,
+                      u32 priority)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_bearer *b_ptr;
        struct tipc_media *m_ptr;
        struct tipc_bearer_names b_names;
@@ -271,7 +278,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
        u32 i;
        int res = -EINVAL;
 
-       if (!tipc_own_addr) {
+       if (!tn->own_addr) {
                pr_warn("Bearer <%s> rejected, not supported in standalone mode\n",
                        name);
                return -ENOPROTOOPT;
@@ -281,11 +288,11 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
                return -EINVAL;
        }
        if (tipc_addr_domain_valid(disc_domain) &&
-           (disc_domain != tipc_own_addr)) {
-               if (tipc_in_scope(disc_domain, tipc_own_addr)) {
-                       disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK;
+           (disc_domain != tn->own_addr)) {
+               if (tipc_in_scope(disc_domain, tn->own_addr)) {
+                       disc_domain = tn->own_addr & TIPC_CLUSTER_MASK;
                        res = 0;   /* accept any node in own cluster */
-               } else if (in_own_cluster_exact(disc_domain))
+               } else if (in_own_cluster_exact(net, disc_domain))
                        res = 0;   /* accept specified node in own cluster */
        }
        if (res) {
@@ -313,7 +320,7 @@ restart:
        bearer_id = MAX_BEARERS;
        with_this_prio = 1;
        for (i = MAX_BEARERS; i-- != 0; ) {
-               b_ptr = rtnl_dereference(bearer_list[i]);
+               b_ptr = rtnl_dereference(tn->bearer_list[i]);
                if (!b_ptr) {
                        bearer_id = i;
                        continue;
@@ -347,7 +354,7 @@ restart:
 
        strcpy(b_ptr->name, name);
        b_ptr->media = m_ptr;
-       res = m_ptr->enable_media(b_ptr);
+       res = m_ptr->enable_media(net, b_ptr);
        if (res) {
                pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
                        name, -res);
@@ -361,15 +368,15 @@ restart:
        b_ptr->net_plane = bearer_id + 'A';
        b_ptr->priority = priority;
 
-       res = tipc_disc_create(b_ptr, &b_ptr->bcast_addr);
+       res = tipc_disc_create(net, b_ptr, &b_ptr->bcast_addr);
        if (res) {
-               bearer_disable(b_ptr, false);
+               bearer_disable(net, b_ptr, false);
                pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
                        name);
                return -EINVAL;
        }
 
-       rcu_assign_pointer(bearer_list[bearer_id], b_ptr);
+       rcu_assign_pointer(tn->bearer_list[bearer_id], b_ptr);
 
        pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
                name,
@@ -380,11 +387,11 @@ restart:
 /**
  * tipc_reset_bearer - Reset all links established over this bearer
  */
-static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
+static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
 {
        pr_info("Resetting bearer <%s>\n", b_ptr->name);
-       tipc_link_reset_list(b_ptr->identity);
-       tipc_disc_reset(b_ptr);
+       tipc_link_reset_list(net, b_ptr->identity);
+       tipc_disc_reset(net, b_ptr);
        return 0;
 }
 
@@ -393,49 +400,51 @@ static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
  *
  * Note: This routine assumes caller holds RTNL lock.
  */
-static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
+static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
+                          bool shutting_down)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        u32 i;
 
        pr_info("Disabling bearer <%s>\n", b_ptr->name);
        b_ptr->media->disable_media(b_ptr);
 
-       tipc_link_delete_list(b_ptr->identity, shutting_down);
+       tipc_link_delete_list(net, b_ptr->identity, shutting_down);
        if (b_ptr->link_req)
                tipc_disc_delete(b_ptr->link_req);
 
        for (i = 0; i < MAX_BEARERS; i++) {
-               if (b_ptr == rtnl_dereference(bearer_list[i])) {
-                       RCU_INIT_POINTER(bearer_list[i], NULL);
+               if (b_ptr == rtnl_dereference(tn->bearer_list[i])) {
+                       RCU_INIT_POINTER(tn->bearer_list[i], NULL);
                        break;
                }
        }
        kfree_rcu(b_ptr, rcu);
 }
 
-int tipc_disable_bearer(const char *name)
+int tipc_disable_bearer(struct net *net, const char *name)
 {
        struct tipc_bearer *b_ptr;
        int res;
 
-       b_ptr = tipc_bearer_find(name);
+       b_ptr = tipc_bearer_find(net, name);
        if (b_ptr == NULL) {
                pr_warn("Attempt to disable unknown bearer <%s>\n", name);
                res = -EINVAL;
        } else {
-               bearer_disable(b_ptr, false);
+               bearer_disable(net, b_ptr, false);
                res = 0;
        }
        return res;
 }
 
-int tipc_enable_l2_media(struct tipc_bearer *b)
+int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b)
 {
        struct net_device *dev;
        char *driver_name = strchr((const char *)b->name, ':') + 1;
 
        /* Find device with specified name */
-       dev = dev_get_by_name(&init_net, driver_name);
+       dev = dev_get_by_name(net, driver_name);
        if (!dev)
                return -ENODEV;
 
@@ -474,8 +483,8 @@ void tipc_disable_l2_media(struct tipc_bearer *b)
  * @b_ptr: the bearer through which the packet is to be sent
  * @dest: peer destination address
  */
-int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
-                    struct tipc_media_addr *dest)
+int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
+                    struct tipc_bearer *b, struct tipc_media_addr *dest)
 {
        struct sk_buff *clone;
        struct net_device *dev;
@@ -511,15 +520,16 @@ int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
  * The media send routine must not alter the buffer being passed in
  * as it may be needed for later retransmission!
  */
-void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
+void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
                      struct tipc_media_addr *dest)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_bearer *b_ptr;
 
        rcu_read_lock();
-       b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
+       b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
        if (likely(b_ptr))
-               b_ptr->media->send_msg(buf, b_ptr, dest);
+               b_ptr->media->send_msg(net, buf, b_ptr, dest);
        rcu_read_unlock();
 }
 
@@ -539,17 +549,12 @@ static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
 {
        struct tipc_bearer *b_ptr;
 
-       if (!net_eq(dev_net(dev), &init_net)) {
-               kfree_skb(buf);
-               return NET_RX_DROP;
-       }
-
        rcu_read_lock();
        b_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
        if (likely(b_ptr)) {
                if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
                        buf->next = NULL;
-                       tipc_rcv(buf, b_ptr);
+                       tipc_rcv(dev_net(dev), buf, b_ptr);
                        rcu_read_unlock();
                        return NET_RX_SUCCESS;
                }
@@ -572,11 +577,9 @@ static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
 static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
                                void *ptr)
 {
-       struct tipc_bearer *b_ptr;
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-
-       if (!net_eq(dev_net(dev), &init_net))
-               return NOTIFY_DONE;
+       struct net *net = dev_net(dev);
+       struct tipc_bearer *b_ptr;
 
        b_ptr = rtnl_dereference(dev->tipc_ptr);
        if (!b_ptr)
@@ -590,16 +593,16 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
                        break;
        case NETDEV_DOWN:
        case NETDEV_CHANGEMTU:
-               tipc_reset_bearer(b_ptr);
+               tipc_reset_bearer(net, b_ptr);
                break;
        case NETDEV_CHANGEADDR:
                b_ptr->media->raw2addr(b_ptr, &b_ptr->addr,
                                       (char *)dev->dev_addr);
-               tipc_reset_bearer(b_ptr);
+               tipc_reset_bearer(net, b_ptr);
                break;
        case NETDEV_UNREGISTER:
        case NETDEV_CHANGENAME:
-               bearer_disable(b_ptr, false);
+               bearer_disable(dev_net(dev), b_ptr, false);
                break;
        }
        return NOTIFY_OK;
@@ -632,16 +635,17 @@ void tipc_bearer_cleanup(void)
        dev_remove_pack(&tipc_packet_type);
 }
 
-void tipc_bearer_stop(void)
+void tipc_bearer_stop(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_bearer *b_ptr;
        u32 i;
 
        for (i = 0; i < MAX_BEARERS; i++) {
-               b_ptr = rtnl_dereference(bearer_list[i]);
+               b_ptr = rtnl_dereference(tn->bearer_list[i]);
                if (b_ptr) {
-                       bearer_disable(b_ptr, true);
-                       bearer_list[i] = NULL;
+                       bearer_disable(net, b_ptr, true);
+                       tn->bearer_list[i] = NULL;
                }
        }
 }
@@ -698,6 +702,8 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
        int i = cb->args[0];
        struct tipc_bearer *bearer;
        struct tipc_nl_msg msg;
+       struct net *net = sock_net(skb->sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
        if (i == MAX_BEARERS)
                return 0;
@@ -708,7 +714,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
        rtnl_lock();
        for (i = 0; i < MAX_BEARERS; i++) {
-               bearer = rtnl_dereference(bearer_list[i]);
+               bearer = rtnl_dereference(tn->bearer_list[i]);
                if (!bearer)
                        continue;
 
@@ -730,6 +736,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
        struct tipc_bearer *bearer;
        struct tipc_nl_msg msg;
        struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+       struct net *net = genl_info_net(info);
 
        if (!info->attrs[TIPC_NLA_BEARER])
                return -EINVAL;
@@ -753,7 +760,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
        msg.seq = info->snd_seq;
 
        rtnl_lock();
-       bearer = tipc_bearer_find(name);
+       bearer = tipc_bearer_find(net, name);
        if (!bearer) {
                err = -EINVAL;
                goto err_out;
@@ -778,6 +785,7 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
        char *name;
        struct tipc_bearer *bearer;
        struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+       struct net *net = genl_info_net(info);
 
        if (!info->attrs[TIPC_NLA_BEARER])
                return -EINVAL;
@@ -794,13 +802,13 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
        name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
 
        rtnl_lock();
-       bearer = tipc_bearer_find(name);
+       bearer = tipc_bearer_find(net, name);
        if (!bearer) {
                rtnl_unlock();
                return -EINVAL;
        }
 
-       bearer_disable(bearer, false);
+       bearer_disable(net, bearer, false);
        rtnl_unlock();
 
        return 0;
@@ -808,6 +816,8 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
 
 int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
 {
+       struct net *net = genl_info_net(info);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        int err;
        char *bearer;
        struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
@@ -815,7 +825,7 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
        u32 prio;
 
        prio = TIPC_MEDIA_LINK_PRI;
-       domain = tipc_own_addr & TIPC_CLUSTER_MASK;
+       domain = tn->own_addr & TIPC_CLUSTER_MASK;
 
        if (!info->attrs[TIPC_NLA_BEARER])
                return -EINVAL;
@@ -847,7 +857,7 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
        }
 
        rtnl_lock();
-       err = tipc_enable_bearer(bearer, domain, prio);
+       err = tipc_enable_bearer(net, bearer, domain, prio);
        if (err) {
                rtnl_unlock();
                return err;
@@ -863,6 +873,7 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
        char *name;
        struct tipc_bearer *b;
        struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+       struct net *net = genl_info_net(info);
 
        if (!info->attrs[TIPC_NLA_BEARER])
                return -EINVAL;
@@ -878,7 +889,7 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
        name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
 
        rtnl_lock();
-       b = tipc_bearer_find(name);
+       b = tipc_bearer_find(net, name);
        if (!b) {
                rtnl_unlock();
                return -EINVAL;
index 2c1230ac5dfe8d66642b30e144d8015ef6a76ed5..c035e3e24764e176f83f09b7f79adbb22c13a629 100644 (file)
 #ifndef _TIPC_BEARER_H
 #define _TIPC_BEARER_H
 
-#include "bcast.h"
 #include "netlink.h"
 #include <net/genetlink.h>
 
 #define MAX_BEARERS    2
 #define MAX_MEDIA      2
+#define MAX_NODES      4096
+#define WSIZE          32
 
 /* Identifiers associated with TIPC message header media address info
  * - address info field is 32 bytes long
 #define TIPC_MEDIA_TYPE_ETH    1
 #define TIPC_MEDIA_TYPE_IB     2
 
+/**
+ * struct tipc_node_map - set of node identifiers
+ * @count: # of nodes in set
+ * @map: bitmap of node identifiers that are in the set
+ */
+struct tipc_node_map {
+       u32 count;
+       u32 map[MAX_NODES / WSIZE];
+};
+
 /**
  * struct tipc_media_addr - destination address used by TIPC bearers
  * @value: address info (format defined by media)
@@ -89,10 +100,10 @@ struct tipc_bearer;
  * @name: media name
  */
 struct tipc_media {
-       int (*send_msg)(struct sk_buff *buf,
+       int (*send_msg)(struct net *net, struct sk_buff *buf,
                        struct tipc_bearer *b_ptr,
                        struct tipc_media_addr *dest);
-       int (*enable_media)(struct tipc_bearer *b_ptr);
+       int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr);
        void (*disable_media)(struct tipc_bearer *b_ptr);
        int (*addr2str)(struct tipc_media_addr *addr,
                        char *strbuf,
@@ -157,17 +168,14 @@ struct tipc_bearer_names {
        char if_name[TIPC_MAX_IF_NAME];
 };
 
-struct tipc_link;
-
-extern struct tipc_bearer __rcu *bearer_list[];
-
 /*
  * TIPC routines available to supported media types
  */
 
-void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *tb_ptr);
-int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
-int tipc_disable_bearer(const char *name);
+void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr);
+int tipc_enable_bearer(struct net *net, const char *bearer_name,
+                      u32 disc_domain, u32 priority);
+int tipc_disable_bearer(struct net *net, const char *name);
 
 /*
  * Routines made available to TIPC by supported media types
@@ -192,20 +200,20 @@ int tipc_media_set_priority(const char *name, u32 new_value);
 int tipc_media_set_window(const char *name, u32 new_value);
 void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
 struct sk_buff *tipc_media_get_names(void);
-int tipc_enable_l2_media(struct tipc_bearer *b);
+int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b);
 void tipc_disable_l2_media(struct tipc_bearer *b);
-int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
-                    struct tipc_media_addr *dest);
+int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
+                    struct tipc_bearer *b, struct tipc_media_addr *dest);
 
-struct sk_buff *tipc_bearer_get_names(void);
-void tipc_bearer_add_dest(u32 bearer_id, u32 dest);
-void tipc_bearer_remove_dest(u32 bearer_id, u32 dest);
-struct tipc_bearer *tipc_bearer_find(const char *name);
+struct sk_buff *tipc_bearer_get_names(struct net *net);
+void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest);
+void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest);
+struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name);
 struct tipc_media *tipc_media_find(const char *name);
 int tipc_bearer_setup(void);
 void tipc_bearer_cleanup(void);
-void tipc_bearer_stop(void);
-void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
+void tipc_bearer_stop(struct net *net);
+void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
                      struct tipc_media_addr *dest);
 
 #endif /* _TIPC_BEARER_H */
index 876f4c6a2631b35eba2b9927430b30c34d125c87..6873360cda531799e040c7ecd17ce3c98d156bbf 100644 (file)
@@ -134,7 +134,7 @@ static struct sk_buff *tipc_show_stats(void)
        return buf;
 }
 
-static struct sk_buff *cfg_enable_bearer(void)
+static struct sk_buff *cfg_enable_bearer(struct net *net)
 {
        struct tipc_bearer_config *args;
 
@@ -142,7 +142,7 @@ static struct sk_buff *cfg_enable_bearer(void)
                return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
        args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area);
-       if (tipc_enable_bearer(args->name,
+       if (tipc_enable_bearer(net, args->name,
                               ntohl(args->disc_domain),
                               ntohl(args->priority)))
                return tipc_cfg_reply_error_string("unable to enable bearer");
@@ -150,78 +150,66 @@ static struct sk_buff *cfg_enable_bearer(void)
        return tipc_cfg_reply_none();
 }
 
-static struct sk_buff *cfg_disable_bearer(void)
+static struct sk_buff *cfg_disable_bearer(struct net *net)
 {
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME))
                return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
-       if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area)))
+       if (tipc_disable_bearer(net, (char *)TLV_DATA(req_tlv_area)))
                return tipc_cfg_reply_error_string("unable to disable bearer");
 
        return tipc_cfg_reply_none();
 }
 
-static struct sk_buff *cfg_set_own_addr(void)
+static struct sk_buff *cfg_set_own_addr(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        u32 addr;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
                return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
        addr = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-       if (addr == tipc_own_addr)
+       if (addr == tn->own_addr)
                return tipc_cfg_reply_none();
        if (!tipc_addr_node_valid(addr))
                return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
                                                   " (node address)");
-       if (tipc_own_addr)
+       if (tn->own_addr)
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                   " (cannot change node address once assigned)");
-       if (!tipc_net_start(addr))
+       if (!tipc_net_start(net, addr))
                return tipc_cfg_reply_none();
 
        return tipc_cfg_reply_error_string("cannot change to network mode");
 }
 
-static struct sk_buff *cfg_set_max_ports(void)
+static struct sk_buff *cfg_set_netid(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        u32 value;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
                return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
        value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-       if (value == tipc_max_ports)
-               return tipc_cfg_reply_none();
-       if (value < 127 || value > 65535)
-               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                                  " (max ports must be 127-65535)");
-       return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
-               " (cannot change max ports while TIPC is active)");
-}
-
-static struct sk_buff *cfg_set_netid(void)
-{
-       u32 value;
-
-       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-       value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-       if (value == tipc_net_id)
+       if (value == tn->net_id)
                return tipc_cfg_reply_none();
        if (value < 1 || value > 9999)
                return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
                                                   " (network id must be 1-9999)");
-       if (tipc_own_addr)
+       if (tn->own_addr)
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                        " (cannot change network id once TIPC has joined a network)");
-       tipc_net_id = value;
+       tn->net_id = value;
        return tipc_cfg_reply_none();
 }
 
-struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
-                               int request_space, int reply_headroom)
+struct sk_buff *tipc_cfg_do_cmd(struct net *net, u32 orig_node, u16 cmd,
+                               const void *request_area, int request_space,
+                               int reply_headroom)
 {
        struct sk_buff *rep_tlv_buf;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
        rtnl_lock();
 
@@ -231,7 +219,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        rep_headroom = reply_headroom;
 
        /* Check command authorization */
-       if (likely(in_own_node(orig_node))) {
+       if (likely(in_own_node(net, orig_node))) {
                /* command is permitted */
        } else {
                rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
@@ -245,28 +233,33 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
                rep_tlv_buf = tipc_cfg_reply_none();
                break;
        case TIPC_CMD_GET_NODES:
-               rep_tlv_buf = tipc_node_get_nodes(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_node_get_nodes(net, req_tlv_area,
+                                                 req_tlv_space);
                break;
        case TIPC_CMD_GET_LINKS:
-               rep_tlv_buf = tipc_node_get_links(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_node_get_links(net, req_tlv_area,
+                                                 req_tlv_space);
                break;
        case TIPC_CMD_SHOW_LINK_STATS:
-               rep_tlv_buf = tipc_link_cmd_show_stats(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_link_cmd_show_stats(net, req_tlv_area,
+                                                      req_tlv_space);
                break;
        case TIPC_CMD_RESET_LINK_STATS:
-               rep_tlv_buf = tipc_link_cmd_reset_stats(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_link_cmd_reset_stats(net, req_tlv_area,
+                                                       req_tlv_space);
                break;
        case TIPC_CMD_SHOW_NAME_TABLE:
-               rep_tlv_buf = tipc_nametbl_get(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_nametbl_get(net, req_tlv_area,
+                                              req_tlv_space);
                break;
        case TIPC_CMD_GET_BEARER_NAMES:
-               rep_tlv_buf = tipc_bearer_get_names();
+               rep_tlv_buf = tipc_bearer_get_names(net);
                break;
        case TIPC_CMD_GET_MEDIA_NAMES:
                rep_tlv_buf = tipc_media_get_names();
                break;
        case TIPC_CMD_SHOW_PORTS:
-               rep_tlv_buf = tipc_sk_socks_show();
+               rep_tlv_buf = tipc_sk_socks_show(net);
                break;
        case TIPC_CMD_SHOW_STATS:
                rep_tlv_buf = tipc_show_stats();
@@ -274,28 +267,23 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_SET_LINK_TOL:
        case TIPC_CMD_SET_LINK_PRI:
        case TIPC_CMD_SET_LINK_WINDOW:
-               rep_tlv_buf = tipc_link_cmd_config(req_tlv_area, req_tlv_space, cmd);
+               rep_tlv_buf = tipc_link_cmd_config(net, req_tlv_area,
+                                                  req_tlv_space, cmd);
                break;
        case TIPC_CMD_ENABLE_BEARER:
-               rep_tlv_buf = cfg_enable_bearer();
+               rep_tlv_buf = cfg_enable_bearer(net);
                break;
        case TIPC_CMD_DISABLE_BEARER:
-               rep_tlv_buf = cfg_disable_bearer();
+               rep_tlv_buf = cfg_disable_bearer(net);
                break;
        case TIPC_CMD_SET_NODE_ADDR:
-               rep_tlv_buf = cfg_set_own_addr();
-               break;
-       case TIPC_CMD_SET_MAX_PORTS:
-               rep_tlv_buf = cfg_set_max_ports();
+               rep_tlv_buf = cfg_set_own_addr(net);
                break;
        case TIPC_CMD_SET_NETID:
-               rep_tlv_buf = cfg_set_netid();
-               break;
-       case TIPC_CMD_GET_MAX_PORTS:
-               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports);
+               rep_tlv_buf = cfg_set_netid(net);
                break;
        case TIPC_CMD_GET_NETID:
-               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
+               rep_tlv_buf = tipc_cfg_reply_unsigned(tn->net_id);
                break;
        case TIPC_CMD_NOT_NET_ADMIN:
                rep_tlv_buf =
@@ -317,6 +305,8 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_SET_REMOTE_MNG:
        case TIPC_CMD_GET_REMOTE_MNG:
        case TIPC_CMD_DUMP_LOG:
+       case TIPC_CMD_SET_MAX_PORTS:
+       case TIPC_CMD_GET_MAX_PORTS:
                rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                          " (obsolete command)");
                break;
index 47b1bf18161215afbbe7020845c9dd92c79381bd..9e9b575fc429b5762e162296936ffe1a298203e9 100644 (file)
 #ifndef _TIPC_CONFIG_H
 #define _TIPC_CONFIG_H
 
-/* ---------------------------------------------------------------------- */
-
 #include "link.h"
 
+#define ULTRA_STRING_MAX_LEN   32768
+
 struct sk_buff *tipc_cfg_reply_alloc(int payload_size);
 int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
                        void *tlv_data, int tlv_data_size);
@@ -61,7 +61,7 @@ static inline struct sk_buff *tipc_cfg_reply_ultra_string(char *string)
        return tipc_cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
 }
 
-struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
+struct sk_buff *tipc_cfg_do_cmd(struct net *net, u32 orig_node, u16 cmd,
                                const void *req_tlv_area, int req_tlv_space,
                                int headroom);
 #endif
index a5737b8407ddbf63f9ecfb024bde3b079f979098..674bd26985289f5f5cda583a2244456bd520e5c3 100644 (file)
@@ -34,6 +34,8 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "core.h"
 #include "name_table.h"
 #include "subscr.h"
 
 #include <linux/module.h>
 
-/* global variables used by multiple sub-systems within TIPC */
-int tipc_random __read_mostly;
-
 /* configurable TIPC parameters */
-u32 tipc_own_addr __read_mostly;
-int tipc_max_ports __read_mostly;
 int tipc_net_id __read_mostly;
 int sysctl_tipc_rmem[3] __read_mostly; /* min/default/max */
 
-/**
- * tipc_buf_acquire - creates a TIPC message buffer
- * @size: message size (including TIPC header)
- *
- * Returns a new buffer with data pointers set to the specified size.
- *
- * NOTE: Headroom is reserved to allow prepending of a data link header.
- *       There may also be unrequested tailroom present at the buffer's end.
- */
-struct sk_buff *tipc_buf_acquire(u32 size)
+static int __net_init tipc_init_net(struct net *net)
 {
-       struct sk_buff *skb;
-       unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
-
-       skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
-       if (skb) {
-               skb_reserve(skb, BUF_HEADROOM);
-               skb_put(skb, size);
-               skb->next = NULL;
-       }
-       return skb;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       int err;
+
+       tn->net_id = 4711;
+       tn->own_addr = 0;
+       get_random_bytes(&tn->random, sizeof(int));
+       INIT_LIST_HEAD(&tn->node_list);
+       spin_lock_init(&tn->node_list_lock);
+
+       err = tipc_sk_rht_init(net);
+       if (err)
+               goto out_sk_rht;
+
+       err = tipc_nametbl_init(net);
+       if (err)
+               goto out_nametbl;
+
+       err = tipc_subscr_start(net);
+       if (err)
+               goto out_subscr;
+       return 0;
+
+out_subscr:
+       tipc_nametbl_stop(net);
+out_nametbl:
+       tipc_sk_rht_destroy(net);
+out_sk_rht:
+       return err;
 }
 
-/**
- * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
- */
-static void tipc_core_stop(void)
+static void __net_exit tipc_exit_net(struct net *net)
 {
-       tipc_net_stop();
-       tipc_bearer_cleanup();
-       tipc_netlink_stop();
-       tipc_subscr_stop();
-       tipc_nametbl_stop();
-       tipc_sk_ref_table_stop();
-       tipc_socket_stop();
-       tipc_unregister_sysctl();
+       tipc_subscr_stop(net);
+       tipc_net_stop(net);
+       tipc_nametbl_stop(net);
+       tipc_sk_rht_destroy(net);
 }
 
-/**
- * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode
- */
-static int tipc_core_start(void)
+static struct pernet_operations tipc_net_ops = {
+       .init = tipc_init_net,
+       .exit = tipc_exit_net,
+       .id   = &tipc_net_id,
+       .size = sizeof(struct tipc_net),
+};
+
+static int __init tipc_init(void)
 {
        int err;
 
-       get_random_bytes(&tipc_random, sizeof(tipc_random));
-
-       err = tipc_sk_ref_table_init(tipc_max_ports, tipc_random);
-       if (err)
-               goto out_reftbl;
+       pr_info("Activated (version " TIPC_MOD_VER ")\n");
 
-       err = tipc_nametbl_init();
-       if (err)
-               goto out_nametbl;
+       sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
+                             TIPC_LOW_IMPORTANCE;
+       sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
+                             TIPC_CRITICAL_IMPORTANCE;
+       sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
 
        err = tipc_netlink_start();
        if (err)
@@ -118,58 +119,37 @@ static int tipc_core_start(void)
        if (err)
                goto out_sysctl;
 
-       err = tipc_subscr_start();
+       err = register_pernet_subsys(&tipc_net_ops);
        if (err)
-               goto out_subscr;
+               goto out_pernet;
 
        err = tipc_bearer_setup();
        if (err)
                goto out_bearer;
 
+       pr_info("Started in single node mode\n");
        return 0;
 out_bearer:
-       tipc_subscr_stop();
-out_subscr:
+       unregister_pernet_subsys(&tipc_net_ops);
+out_pernet:
        tipc_unregister_sysctl();
 out_sysctl:
        tipc_socket_stop();
 out_socket:
        tipc_netlink_stop();
 out_netlink:
-       tipc_nametbl_stop();
-out_nametbl:
-       tipc_sk_ref_table_stop();
-out_reftbl:
+       pr_err("Unable to start in single node mode\n");
        return err;
 }
 
-static int __init tipc_init(void)
-{
-       int res;
-
-       pr_info("Activated (version " TIPC_MOD_VER ")\n");
-
-       tipc_own_addr = 0;
-       tipc_max_ports = CONFIG_TIPC_PORTS;
-       tipc_net_id = 4711;
-
-       sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
-                             TIPC_LOW_IMPORTANCE;
-       sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
-                             TIPC_CRITICAL_IMPORTANCE;
-       sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
-
-       res = tipc_core_start();
-       if (res)
-               pr_err("Unable to start in single node mode\n");
-       else
-               pr_info("Started in single node mode\n");
-       return res;
-}
-
 static void __exit tipc_exit(void)
 {
-       tipc_core_stop();
+       tipc_bearer_cleanup();
+       tipc_netlink_stop();
+       tipc_socket_stop();
+       tipc_unregister_sysctl();
+       unregister_pernet_subsys(&tipc_net_ops);
+
        pr_info("Deactivated\n");
 }
 
index 84602137ce20f1fe051d7c3ebd2a8fafa10c2b43..817b2e9d42279185721300079ecb38228e478cd7 100644 (file)
@@ -37,8 +37,6 @@
 #ifndef _TIPC_CORE_H
 #define _TIPC_CORE_H
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/tipc.h>
 #include <linux/tipc_config.h>
 #include <linux/tipc_netlink.h>
 #include <linux/vmalloc.h>
 #include <linux/rtnetlink.h>
 #include <linux/etherdevice.h>
+#include <net/netns/generic.h>
+#include <linux/rhashtable.h>
 
-#define TIPC_MOD_VER "2.0.0"
-
-#define ULTRA_STRING_MAX_LEN   32768
-#define TIPC_MAX_SUBSCRIPTIONS 65535
-#define TIPC_MAX_PUBLICATIONS  65535
+#include "node.h"
+#include "bearer.h"
+#include "bcast.h"
+#include "netlink.h"
+#include "link.h"
+#include "node.h"
+#include "msg.h"
 
-struct tipc_msg;       /* msg.h */
+#define TIPC_MOD_VER "2.0.0"
 
 int tipc_snprintf(char *buf, int len, const char *fmt, ...);
 
-/*
- * TIPC-specific error codes
- */
-#define ELINKCONG EAGAIN       /* link congestion <=> resource unavailable */
-
-/*
- * Global configuration variables
- */
-extern u32 tipc_own_addr __read_mostly;
-extern int tipc_max_ports __read_mostly;
 extern int tipc_net_id __read_mostly;
 extern int sysctl_tipc_rmem[3] __read_mostly;
 extern int sysctl_tipc_named_timeout __read_mostly;
 
-/*
- * Other global variables
- */
-extern int tipc_random __read_mostly;
+struct tipc_net {
+       u32 own_addr;
+       int net_id;
+       int random;
 
-/*
- * Routines available to privileged subsystems
- */
-int tipc_netlink_start(void);
-void tipc_netlink_stop(void);
-int tipc_socket_init(void);
-void tipc_socket_stop(void);
-int tipc_sock_create_local(int type, struct socket **res);
-void tipc_sock_release_local(struct socket *sock);
-int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
-                          int flags);
+       /* Node table and node list */
+       spinlock_t node_list_lock;
+       struct hlist_head node_htable[NODE_HTABLE_SIZE];
+       struct list_head node_list;
+       u32 num_nodes;
+       u32 num_links;
+
+       /* Bearer list */
+       struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
+
+       /* Broadcast link */
+       struct tipc_bcbearer *bcbearer;
+       struct tipc_bclink *bclink;
+       struct tipc_link *bcl;
+
+       /* Socket hash table */
+       struct rhashtable sk_rht;
+
+       /* Name table */
+       spinlock_t nametbl_lock;
+       struct name_table *nametbl;
+
+       /* Topology subscription server */
+       struct tipc_server *topsrv;
+       atomic_t subscription_count;
+};
 
 #ifdef CONFIG_SYSCTL
 int tipc_register_sysctl(void);
@@ -109,101 +116,4 @@ void tipc_unregister_sysctl(void);
 #define tipc_unregister_sysctl()
 #endif
 
-/*
- * TIPC timer code
- */
-typedef void (*Handler) (unsigned long);
-
-/**
- * k_init_timer - initialize a timer
- * @timer: pointer to timer structure
- * @routine: pointer to routine to invoke when timer expires
- * @argument: value to pass to routine when timer expires
- *
- * Timer must be initialized before use (and terminated when no longer needed).
- */
-static inline void k_init_timer(struct timer_list *timer, Handler routine,
-                               unsigned long argument)
-{
-       setup_timer(timer, routine, argument);
-}
-
-/**
- * k_start_timer - start a timer
- * @timer: pointer to timer structure
- * @msec: time to delay (in ms)
- *
- * Schedules a previously initialized timer for later execution.
- * If timer is already running, the new timeout overrides the previous request.
- *
- * To ensure the timer doesn't expire before the specified delay elapses,
- * the amount of delay is rounded up when converting to the jiffies
- * then an additional jiffy is added to account for the fact that
- * the starting time may be in the middle of the current jiffy.
- */
-static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
-{
-       mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
-}
-
-/**
- * k_cancel_timer - cancel a timer
- * @timer: pointer to timer structure
- *
- * Cancels a previously initialized timer.
- * Can be called safely even if the timer is already inactive.
- *
- * WARNING: Must not be called when holding locks required by the timer's
- *          timeout routine, otherwise deadlock can occur on SMP systems!
- */
-static inline void k_cancel_timer(struct timer_list *timer)
-{
-       del_timer_sync(timer);
-}
-
-/**
- * k_term_timer - terminate a timer
- * @timer: pointer to timer structure
- *
- * Prevents further use of a previously initialized timer.
- *
- * WARNING: Caller must ensure timer isn't currently running.
- *
- * (Do not "enhance" this routine to automatically cancel an active timer,
- * otherwise deadlock can arise when a timeout routine calls k_term_timer.)
- */
-static inline void k_term_timer(struct timer_list *timer)
-{
-}
-
-/*
- * TIPC message buffer code
- *
- * TIPC message buffer headroom reserves space for the worst-case
- * link-level device header (in case the message is sent off-node).
- *
- * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
- *       are word aligned for quicker access
- */
-#define BUF_HEADROOM LL_MAX_HEADER
-
-struct tipc_skb_cb {
-       void *handle;
-       struct sk_buff *tail;
-       bool deferred;
-       bool wakeup_pending;
-       bool bundling;
-       u16 chain_sz;
-       u16 chain_imp;
-};
-
-#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
-
-static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
-{
-       return (struct tipc_msg *)skb->data;
-}
-
-struct sk_buff *tipc_buf_acquire(u32 size);
-
 #endif
index aa722a42ef8b03b4d840e31bbf7582d51948fbae..feef3753615d24f9067f859fcf7b0476b2708775 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/discover.c
  *
- * Copyright (c) 2003-2006, 2014, Ericsson AB
+ * Copyright (c) 2003-2006, 2014-2015, Ericsson AB
  * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
  * All rights reserved.
  *
 #include "link.h"
 #include "discover.h"
 
-#define TIPC_LINK_REQ_INIT     125     /* min delay during bearer start up */
-#define TIPC_LINK_REQ_FAST     1000    /* max delay if bearer has no links */
-#define TIPC_LINK_REQ_SLOW     60000   /* max delay if bearer has links */
-#define TIPC_LINK_REQ_INACTIVE 0xffffffff /* indicates no timer in use */
-
+/* min delay during bearer start up */
+#define TIPC_LINK_REQ_INIT     msecs_to_jiffies(125)
+/* max delay if bearer has no links */
+#define TIPC_LINK_REQ_FAST     msecs_to_jiffies(1000)
+/* max delay if bearer has links */
+#define TIPC_LINK_REQ_SLOW     msecs_to_jiffies(60000)
+/* indicates no timer in use */
+#define TIPC_LINK_REQ_INACTIVE 0xffffffff
 
 /**
  * struct tipc_link_req - information about an ongoing link setup request
  * @bearer_id: identity of bearer issuing requests
+ * @net: network namespace instance
  * @dest: destination address for request messages
  * @domain: network domain to which links can be established
  * @num_nodes: number of nodes currently discovered (i.e. with an active link)
 struct tipc_link_req {
        u32 bearer_id;
        struct tipc_media_addr dest;
+       struct net *net;
        u32 domain;
        int num_nodes;
        spinlock_t lock;
        struct sk_buff *buf;
        struct timer_list timer;
-       unsigned int timer_intv;
+       unsigned long timer_intv;
 };
 
 /**
  * tipc_disc_init_msg - initialize a link setup message
+ * @net: the applicable net namespace
  * @type: message type (request or response)
  * @b_ptr: ptr to bearer issuing message
  */
-static void tipc_disc_init_msg(struct sk_buff *buf, u32 type,
+static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type,
                               struct tipc_bearer *b_ptr)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_msg *msg;
        u32 dest_domain = b_ptr->domain;
 
        msg = buf_msg(buf);
-       tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
+       tipc_msg_init(tn->own_addr, msg, LINK_CONFIG, type,
+                     INT_H_SIZE, dest_domain);
        msg_set_non_seq(msg, 1);
-       msg_set_node_sig(msg, tipc_random);
+       msg_set_node_sig(msg, tn->random);
        msg_set_dest_domain(msg, dest_domain);
-       msg_set_bc_netid(msg, tipc_net_id);
+       msg_set_bc_netid(msg, tn->net_id);
        b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr);
 }
 
@@ -107,11 +115,14 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
 
 /**
  * tipc_disc_rcv - handle incoming discovery message (request or response)
+ * @net: the applicable net namespace
  * @buf: buffer containing message
  * @bearer: bearer that message arrived on
  */
-void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
+void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
+                  struct tipc_bearer *bearer)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *node;
        struct tipc_link *link;
        struct tipc_media_addr maddr;
@@ -133,7 +144,7 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
        kfree_skb(buf);
 
        /* Ensure message from node is valid and communication is permitted */
-       if (net_id != tipc_net_id)
+       if (net_id != tn->net_id)
                return;
        if (maddr.broadcast)
                return;
@@ -142,23 +153,19 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
        if (!tipc_addr_node_valid(onode))
                return;
 
-       if (in_own_node(onode)) {
+       if (in_own_node(net, onode)) {
                if (memcmp(&maddr, &bearer->addr, sizeof(maddr)))
-                       disc_dupl_alert(bearer, tipc_own_addr, &maddr);
+                       disc_dupl_alert(bearer, tn->own_addr, &maddr);
                return;
        }
-       if (!tipc_in_scope(ddom, tipc_own_addr))
+       if (!tipc_in_scope(ddom, tn->own_addr))
                return;
        if (!tipc_in_scope(bearer->domain, onode))
                return;
 
-       /* Locate, or if necessary, create, node: */
-       node = tipc_node_find(onode);
-       if (!node)
-               node = tipc_node_create(onode);
+       node = tipc_node_create(net, onode);
        if (!node)
                return;
-
        tipc_node_lock(node);
        link = node->links[bearer->identity];
 
@@ -244,8 +251,8 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
        if (respond && (mtyp == DSC_REQ_MSG)) {
                rbuf = tipc_buf_acquire(INT_H_SIZE);
                if (rbuf) {
-                       tipc_disc_init_msg(rbuf, DSC_RESP_MSG, bearer);
-                       tipc_bearer_send(bearer->identity, rbuf, &maddr);
+                       tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer);
+                       tipc_bearer_send(net, bearer->identity, rbuf, &maddr);
                        kfree_skb(rbuf);
                }
        }
@@ -265,7 +272,7 @@ static void disc_update(struct tipc_link_req *req)
                if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) ||
                    (req->timer_intv > TIPC_LINK_REQ_FAST)) {
                        req->timer_intv = TIPC_LINK_REQ_INIT;
-                       k_start_timer(&req->timer, req->timer_intv);
+                       mod_timer(&req->timer, jiffies + req->timer_intv);
                }
        }
 }
@@ -295,12 +302,13 @@ void tipc_disc_remove_dest(struct tipc_link_req *req)
 
 /**
  * disc_timeout - send a periodic link setup request
- * @req: ptr to link request structure
+ * @data: ptr to link request structure
  *
  * Called whenever a link setup request timer associated with a bearer expires.
  */
-static void disc_timeout(struct tipc_link_req *req)
+static void disc_timeout(unsigned long data)
 {
+       struct tipc_link_req *req = (struct tipc_link_req *)data;
        int max_delay;
 
        spin_lock_bh(&req->lock);
@@ -318,7 +326,7 @@ static void disc_timeout(struct tipc_link_req *req)
         * hold at fast polling rate if don't have any associated nodes,
         * otherwise hold at slow polling rate
         */
-       tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
+       tipc_bearer_send(req->net, req->bearer_id, req->buf, &req->dest);
 
 
        req->timer_intv *= 2;
@@ -329,20 +337,22 @@ static void disc_timeout(struct tipc_link_req *req)
        if (req->timer_intv > max_delay)
                req->timer_intv = max_delay;
 
-       k_start_timer(&req->timer, req->timer_intv);
+       mod_timer(&req->timer, jiffies + req->timer_intv);
 exit:
        spin_unlock_bh(&req->lock);
 }
 
 /**
  * tipc_disc_create - create object to send periodic link setup requests
+ * @net: the applicable net namespace
  * @b_ptr: ptr to bearer issuing requests
  * @dest: destination address for request messages
  * @dest_domain: network domain to which links can be established
  *
  * Returns 0 if successful, otherwise -errno.
  */
-int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
+int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
+                    struct tipc_media_addr *dest)
 {
        struct tipc_link_req *req;
 
@@ -356,17 +366,18 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
                return -ENOMEM;
        }
 
-       tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
+       tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr);
        memcpy(&req->dest, dest, sizeof(*dest));
+       req->net = net;
        req->bearer_id = b_ptr->identity;
        req->domain = b_ptr->domain;
        req->num_nodes = 0;
        req->timer_intv = TIPC_LINK_REQ_INIT;
        spin_lock_init(&req->lock);
-       k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
-       k_start_timer(&req->timer, req->timer_intv);
+       setup_timer(&req->timer, disc_timeout, (unsigned long)req);
+       mod_timer(&req->timer, jiffies + req->timer_intv);
        b_ptr->link_req = req;
-       tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
+       tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest);
        return 0;
 }
 
@@ -376,28 +387,29 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
  */
 void tipc_disc_delete(struct tipc_link_req *req)
 {
-       k_cancel_timer(&req->timer);
-       k_term_timer(&req->timer);
+       del_timer_sync(&req->timer);
        kfree_skb(req->buf);
        kfree(req);
 }
 
 /**
  * tipc_disc_reset - reset object to send periodic link setup requests
+ * @net: the applicable net namespace
  * @b_ptr: ptr to bearer issuing requests
  * @dest_domain: network domain to which links can be established
  */
-void tipc_disc_reset(struct tipc_bearer *b_ptr)
+void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr)
 {
        struct tipc_link_req *req = b_ptr->link_req;
 
        spin_lock_bh(&req->lock);
-       tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
+       tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr);
+       req->net = net;
        req->bearer_id = b_ptr->identity;
        req->domain = b_ptr->domain;
        req->num_nodes = 0;
        req->timer_intv = TIPC_LINK_REQ_INIT;
-       k_start_timer(&req->timer, req->timer_intv);
-       tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
+       mod_timer(&req->timer, jiffies + req->timer_intv);
+       tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest);
        spin_unlock_bh(&req->lock);
 }
index 515b57392f4d881b567d6d29cc7677bfece5e4c7..c9b12770c5ed9a4d4e38f17e5434c0bb86eecac5 100644 (file)
 
 struct tipc_link_req;
 
-int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest);
+int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
+                    struct tipc_media_addr *dest);
 void tipc_disc_delete(struct tipc_link_req *req);
-void tipc_disc_reset(struct tipc_bearer *b_ptr);
+void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr);
 void tipc_disc_add_dest(struct tipc_link_req *req);
 void tipc_disc_remove_dest(struct tipc_link_req *req);
-void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr);
+void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
+                  struct tipc_bearer *b_ptr);
 
 #endif
index 082c3b5b32a12094806db13c46a93994781379d0..942491234099ae6947916f1c6c24a2cbfc3360dc 100644 (file)
@@ -101,19 +101,20 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
  */
 #define START_CHANGEOVER 100000u
 
-static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
-                                      struct sk_buff *buf);
-static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf);
-static int  tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
-                                struct sk_buff **buf);
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
+static void link_handle_out_of_seq_msg(struct tipc_link *link,
+                                      struct sk_buff *skb);
+static void tipc_link_proto_rcv(struct tipc_link *link,
+                               struct sk_buff *skb);
+static int  tipc_link_tunnel_rcv(struct tipc_node *node,
+                                struct sk_buff **skb);
+static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
 static void link_state_event(struct tipc_link *l_ptr, u32 event);
 static void link_reset_statistics(struct tipc_link *l_ptr);
 static void link_print(struct tipc_link *l_ptr, const char *str);
 static void tipc_link_sync_xmit(struct tipc_link *l);
 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
-static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf);
-static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf);
+static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
+static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
 
 /*
  *  Simple link routines
@@ -123,13 +124,30 @@ static unsigned int align(unsigned int i)
        return (i + 3) & ~3u;
 }
 
+static void tipc_link_release(struct kref *kref)
+{
+       kfree(container_of(kref, struct tipc_link, ref));
+}
+
+static void tipc_link_get(struct tipc_link *l_ptr)
+{
+       kref_get(&l_ptr->ref);
+}
+
+static void tipc_link_put(struct tipc_link *l_ptr)
+{
+       kref_put(&l_ptr->ref, tipc_link_release);
+}
+
 static void link_init_max_pkt(struct tipc_link *l_ptr)
 {
+       struct tipc_node *node = l_ptr->owner;
+       struct tipc_net *tn = net_generic(node->net, tipc_net_id);
        struct tipc_bearer *b_ptr;
        u32 max_pkt;
 
        rcu_read_lock();
-       b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
+       b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
        if (!b_ptr) {
                rcu_read_unlock();
                return;
@@ -169,8 +187,9 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
  * link_timeout - handle expiration of link timer
  * @l_ptr: pointer to link
  */
-static void link_timeout(struct tipc_link *l_ptr)
+static void link_timeout(unsigned long data)
 {
+       struct tipc_link *l_ptr = (struct tipc_link *)data;
        struct sk_buff *skb;
 
        tipc_node_lock(l_ptr->owner);
@@ -215,11 +234,13 @@ static void link_timeout(struct tipc_link *l_ptr)
                tipc_link_push_packets(l_ptr);
 
        tipc_node_unlock(l_ptr->owner);
+       tipc_link_put(l_ptr);
 }
 
-static void link_set_timer(struct tipc_link *l_ptr, u32 time)
+static void link_set_timer(struct tipc_link *link, unsigned long time)
 {
-       k_start_timer(&l_ptr->timer, time);
+       if (!mod_timer(&link->timer, jiffies + time))
+               tipc_link_get(link);
 }
 
 /**
@@ -234,6 +255,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
                                   struct tipc_bearer *b_ptr,
                                   const struct tipc_media_addr *media_addr)
 {
+       struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
        struct tipc_link *l_ptr;
        struct tipc_msg *msg;
        char *if_name;
@@ -259,12 +281,12 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
                pr_warn("Link creation failed, no memory\n");
                return NULL;
        }
-
+       kref_init(&l_ptr->ref);
        l_ptr->addr = peer;
        if_name = strchr(b_ptr->name, ':') + 1;
        sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
-               tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
-               tipc_node(tipc_own_addr),
+               tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
+               tipc_node(tn->own_addr),
                if_name,
                tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
                /* note: peer i/f name is updated by reset/activate message */
@@ -278,9 +300,10 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
 
        l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
        msg = l_ptr->pmsg;
-       tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
+       tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
+                     l_ptr->addr);
        msg_set_size(msg, sizeof(l_ptr->proto_msg));
-       msg_set_session(msg, (tipc_random & 0xffff));
+       msg_set_session(msg, (tn->random & 0xffff));
        msg_set_bearer_id(msg, b_ptr->identity);
        strcpy((char *)msg_data(msg), if_name);
 
@@ -293,48 +316,52 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
        l_ptr->next_out_no = 1;
        __skb_queue_head_init(&l_ptr->outqueue);
        __skb_queue_head_init(&l_ptr->deferred_queue);
-       skb_queue_head_init(&l_ptr->waiting_sks);
-
+       skb_queue_head_init(&l_ptr->wakeupq);
+       skb_queue_head_init(&l_ptr->inputq);
+       skb_queue_head_init(&l_ptr->namedq);
        link_reset_statistics(l_ptr);
-
        tipc_node_attach_link(n_ptr, l_ptr);
-
-       k_init_timer(&l_ptr->timer, (Handler)link_timeout,
-                    (unsigned long)l_ptr);
-
+       setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
        link_state_event(l_ptr, STARTING_EVT);
 
        return l_ptr;
 }
 
-void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
+/**
+ * link_delete - Conditional deletion of link.
+ *               If timer still running, real delete is done when it expires
+ * @link: link to be deleted
+ */
+void tipc_link_delete(struct tipc_link *link)
 {
-       struct tipc_link *l_ptr;
-       struct tipc_node *n_ptr;
+       tipc_link_reset_fragments(link);
+       tipc_node_detach_link(link->owner, link);
+       tipc_link_put(link);
+}
+
+void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
+                          bool shutting_down)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *link;
+       struct tipc_node *node;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
-               tipc_node_lock(n_ptr);
-               l_ptr = n_ptr->links[bearer_id];
-               if (l_ptr) {
-                       tipc_link_reset(l_ptr);
-                       if (shutting_down || !tipc_node_is_up(n_ptr)) {
-                               tipc_node_detach_link(l_ptr->owner, l_ptr);
-                               tipc_link_reset_fragments(l_ptr);
-                               tipc_node_unlock(n_ptr);
-
-                               /* Nobody else can access this link now: */
-                               del_timer_sync(&l_ptr->timer);
-                               kfree(l_ptr);
-                       } else {
-                               /* Detach/delete when failover is finished: */
-                               l_ptr->flags |= LINK_STOPPED;
-                               tipc_node_unlock(n_ptr);
-                               del_timer_sync(&l_ptr->timer);
-                       }
+       list_for_each_entry_rcu(node, &tn->node_list, list) {
+               tipc_node_lock(node);
+               link = node->links[bearer_id];
+               if (!link) {
+                       tipc_node_unlock(node);
                        continue;
                }
-               tipc_node_unlock(n_ptr);
+               tipc_link_reset(link);
+               if (del_timer(&link->timer))
+                       tipc_link_put(link);
+               link->flags |= LINK_STOPPED;
+               /* Delete link now, or when failover is finished: */
+               if (shutting_down || !tipc_node_is_up(node))
+                       tipc_link_delete(link);
+               tipc_node_unlock(node);
        }
        rcu_read_unlock();
 }
@@ -352,13 +379,14 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport,
 {
        struct sk_buff *buf;
 
-       buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, tipc_own_addr,
-                             tipc_own_addr, oport, 0, 0);
+       buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
+                             link_own_addr(link), link_own_addr(link),
+                             oport, 0, 0);
        if (!buf)
                return false;
        TIPC_SKB_CB(buf)->chain_sz = chain_sz;
        TIPC_SKB_CB(buf)->chain_imp = imp;
-       skb_queue_tail(&link->waiting_sks, buf);
+       skb_queue_tail(&link->wakeupq, buf);
        link->stats.link_congs++;
        return true;
 }
@@ -369,17 +397,19 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport,
  * Move a number of waiting users, as permitted by available space in
  * the send queue, from link wait queue to node wait queue for wakeup
  */
-static void link_prepare_wakeup(struct tipc_link *link)
+void link_prepare_wakeup(struct tipc_link *link)
 {
        uint pend_qsz = skb_queue_len(&link->outqueue);
        struct sk_buff *skb, *tmp;
 
-       skb_queue_walk_safe(&link->waiting_sks, skb, tmp) {
+       skb_queue_walk_safe(&link->wakeupq, skb, tmp) {
                if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
                        break;
                pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
-               skb_unlink(skb, &link->waiting_sks);
-               skb_queue_tail(&link->owner->waiting_sks, skb);
+               skb_unlink(skb, &link->wakeupq);
+               skb_queue_tail(&link->inputq, skb);
+               link->owner->inputq = &link->inputq;
+               link->owner->action_flags |= TIPC_MSG_EVT;
        }
 }
 
@@ -425,20 +455,20 @@ void tipc_link_reset(struct tipc_link *l_ptr)
                return;
 
        tipc_node_link_down(l_ptr->owner, l_ptr);
-       tipc_bearer_remove_dest(l_ptr->bearer_id, l_ptr->addr);
+       tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
 
        if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
                l_ptr->reset_checkpoint = checkpoint;
                l_ptr->exp_msg_count = START_CHANGEOVER;
        }
 
-       /* Clean up all queues: */
+       /* Clean up all queues, except inputq: */
        __skb_queue_purge(&l_ptr->outqueue);
        __skb_queue_purge(&l_ptr->deferred_queue);
-       if (!skb_queue_empty(&l_ptr->waiting_sks)) {
-               skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks);
-               owner->action_flags |= TIPC_WAKEUP_USERS;
-       }
+       skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq);
+       if (!skb_queue_empty(&l_ptr->inputq))
+               owner->action_flags |= TIPC_MSG_EVT;
+       owner->inputq = &l_ptr->inputq;
        l_ptr->next_out = NULL;
        l_ptr->unacked_window = 0;
        l_ptr->checkpoint = 1;
@@ -448,13 +478,14 @@ void tipc_link_reset(struct tipc_link *l_ptr)
        link_reset_statistics(l_ptr);
 }
 
-void tipc_link_reset_list(unsigned int bearer_id)
+void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_link *l_ptr;
        struct tipc_node *n_ptr;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+       list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
                tipc_node_lock(n_ptr);
                l_ptr = n_ptr->links[bearer_id];
                if (l_ptr)
@@ -464,11 +495,14 @@ void tipc_link_reset_list(unsigned int bearer_id)
        rcu_read_unlock();
 }
 
-static void link_activate(struct tipc_link *l_ptr)
+static void link_activate(struct tipc_link *link)
 {
-       l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
-       tipc_node_link_up(l_ptr->owner, l_ptr);
-       tipc_bearer_add_dest(l_ptr->bearer_id, l_ptr->addr);
+       struct tipc_node *node = link->owner;
+
+       link->next_in_no = 1;
+       link->stats.recv_info = 1;
+       tipc_node_link_up(node, link);
+       tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
 }
 
 /**
@@ -479,7 +513,7 @@ static void link_activate(struct tipc_link *l_ptr)
 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
 {
        struct tipc_link *other;
-       u32 cont_intv = l_ptr->continuity_interval;
+       unsigned long cont_intv = l_ptr->cont_intv;
 
        if (l_ptr->flags & LINK_STOPPED)
                return;
@@ -522,8 +556,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        link_set_timer(l_ptr, cont_intv / 4);
                        break;
                case RESET_MSG:
-                       pr_info("%s<%s>, requested by peer\n", link_rst_msg,
-                               l_ptr->name);
+                       pr_debug("%s<%s>, requested by peer\n",
+                                link_rst_msg, l_ptr->name);
                        tipc_link_reset(l_ptr);
                        l_ptr->state = RESET_RESET;
                        l_ptr->fsm_msg_cnt = 0;
@@ -533,7 +567,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        link_set_timer(l_ptr, cont_intv);
                        break;
                default:
-                       pr_err("%s%u in WW state\n", link_unk_evt, event);
+                       pr_debug("%s%u in WW state\n", link_unk_evt, event);
                }
                break;
        case WORKING_UNKNOWN:
@@ -545,8 +579,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        link_set_timer(l_ptr, cont_intv);
                        break;
                case RESET_MSG:
-                       pr_info("%s<%s>, requested by peer while probing\n",
-                               link_rst_msg, l_ptr->name);
+                       pr_debug("%s<%s>, requested by peer while probing\n",
+                                link_rst_msg, l_ptr->name);
                        tipc_link_reset(l_ptr);
                        l_ptr->state = RESET_RESET;
                        l_ptr->fsm_msg_cnt = 0;
@@ -572,8 +606,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                                l_ptr->fsm_msg_cnt++;
                                link_set_timer(l_ptr, cont_intv / 4);
                        } else {        /* Link has failed */
-                               pr_warn("%s<%s>, peer not responding\n",
-                                       link_rst_msg, l_ptr->name);
+                               pr_debug("%s<%s>, peer not responding\n",
+                                        link_rst_msg, l_ptr->name);
                                tipc_link_reset(l_ptr);
                                l_ptr->state = RESET_UNKNOWN;
                                l_ptr->fsm_msg_cnt = 0;
@@ -614,7 +648,9 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        break;
                case STARTING_EVT:
                        l_ptr->flags |= LINK_STARTED;
-                       /* fall through */
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
                case TIMEOUT_EVT:
                        tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
@@ -700,7 +736,8 @@ drop:
  * Only the socket functions tipc_send_stream() and tipc_send_packet() need
  * to act on the return value, since they may need to do more send attempts.
  */
-int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
+int __tipc_link_xmit(struct net *net, struct tipc_link *link,
+                    struct sk_buff_head *list)
 {
        struct tipc_msg *msg = buf_msg(skb_peek(list));
        uint psz = msg_size(msg);
@@ -733,7 +770,8 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
 
                if (skb_queue_len(outqueue) < sndlim) {
                        __skb_queue_tail(outqueue, skb);
-                       tipc_bearer_send(link->bearer_id, skb, addr);
+                       tipc_bearer_send(net, link->bearer_id,
+                                        skb, addr);
                        link->next_out = NULL;
                        link->unacked_window = 0;
                } else if (tipc_msg_bundle(outqueue, skb, mtu)) {
@@ -758,7 +796,7 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
 
 static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
 {
-       __skb_queue_head_init(list);
+       skb_queue_head_init(list);
        __skb_queue_tail(list, skb);
 }
 
@@ -767,19 +805,21 @@ static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
        struct sk_buff_head head;
 
        skb2list(skb, &head);
-       return __tipc_link_xmit(link, &head);
+       return __tipc_link_xmit(link->owner->net, link, &head);
 }
 
-int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector)
+int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
+                      u32 selector)
 {
        struct sk_buff_head head;
 
        skb2list(skb, &head);
-       return tipc_link_xmit(&head, dnode, selector);
+       return tipc_link_xmit(net, &head, dnode, selector);
 }
 
 /**
  * tipc_link_xmit() is the general link level function for message sending
+ * @net: the applicable net namespace
  * @list: chain of buffers containing message
  * @dsz: amount of user data to be sent
  * @dnode: address of destination node
@@ -787,33 +827,28 @@ int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector)
  * Consumes the buffer chain, except when returning -ELINKCONG
  * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
  */
-int tipc_link_xmit(struct sk_buff_head *list, u32 dnode, u32 selector)
+int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
+                  u32 selector)
 {
        struct tipc_link *link = NULL;
        struct tipc_node *node;
        int rc = -EHOSTUNREACH;
 
-       node = tipc_node_find(dnode);
+       node = tipc_node_find(net, dnode);
        if (node) {
                tipc_node_lock(node);
                link = node->active_links[selector & 1];
                if (link)
-                       rc = __tipc_link_xmit(link, list);
+                       rc = __tipc_link_xmit(net, link, list);
                tipc_node_unlock(node);
        }
-
        if (link)
                return rc;
 
-       if (likely(in_own_node(dnode))) {
-               /* As a node local message chain never contains more than one
-                * buffer, we just need to dequeue one SKB buffer from the
-                * head list.
-                */
-               return tipc_sk_rcv(__skb_dequeue(list));
-       }
-       __skb_queue_purge(list);
+       if (likely(in_own_node(net, dnode)))
+               return tipc_sk_rcv(net, list);
 
+       __skb_queue_purge(list);
        return rc;
 }
 
@@ -835,7 +870,8 @@ static void tipc_link_sync_xmit(struct tipc_link *link)
                return;
 
        msg = buf_msg(skb);
-       tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr);
+       tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
+                     INT_H_SIZE, link->addr);
        msg_set_last_bcast(msg, link->owner->bclink.acked);
        __tipc_link_xmit_skb(link, skb);
 }
@@ -890,7 +926,8 @@ void tipc_link_push_packets(struct tipc_link *l_ptr)
                        msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
                        if (msg_user(msg) == MSG_BUNDLER)
                                TIPC_SKB_CB(skb)->bundling = false;
-                       tipc_bearer_send(l_ptr->bearer_id, skb,
+                       tipc_bearer_send(l_ptr->owner->net,
+                                        l_ptr->bearer_id, skb,
                                         &l_ptr->media_addr);
                        l_ptr->next_out = tipc_skb_queue_next(outqueue, skb);
                } else {
@@ -923,6 +960,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
                                    struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
+       struct net *net = l_ptr->owner->net;
 
        pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
 
@@ -940,7 +978,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
                pr_cont("Outstanding acks: %lu\n",
                        (unsigned long) TIPC_SKB_CB(buf)->handle);
 
-               n_ptr = tipc_bclink_retransmit_to();
+               n_ptr = tipc_bclink_retransmit_to(net);
                tipc_node_lock(n_ptr);
 
                tipc_addr_string_fill(addr_string, n_ptr->addr);
@@ -955,7 +993,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
 
                tipc_node_unlock(n_ptr);
 
-               tipc_bclink_set_flags(TIPC_BCLINK_RESET);
+               tipc_bclink_set_flags(net, TIPC_BCLINK_RESET);
                l_ptr->stale_count = 0;
        }
 }
@@ -987,7 +1025,8 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
                msg = buf_msg(skb);
                msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
                msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-               tipc_bearer_send(l_ptr->bearer_id, skb, &l_ptr->media_addr);
+               tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
+                                &l_ptr->media_addr);
                retransmits--;
                l_ptr->stats.retransmitted++;
        }
@@ -1063,14 +1102,16 @@ static int link_recv_buf_validate(struct sk_buff *buf)
 
 /**
  * tipc_rcv - process TIPC packets/messages arriving from off-node
+ * @net: the applicable net namespace
  * @skb: TIPC packet
  * @b_ptr: pointer to bearer message arrived on
  *
  * Invoked with no locks held.  Bearer pointer must point to a valid bearer
  * structure (i.e. cannot be NULL), but bearer can be inactive.
  */
-void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
+void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff_head head;
        struct tipc_node *n_ptr;
        struct tipc_link *l_ptr;
@@ -1096,19 +1137,19 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
 
                if (unlikely(msg_non_seq(msg))) {
                        if (msg_user(msg) ==  LINK_CONFIG)
-                               tipc_disc_rcv(skb, b_ptr);
+                               tipc_disc_rcv(net, skb, b_ptr);
                        else
-                               tipc_bclink_rcv(skb);
+                               tipc_bclink_rcv(net, skb);
                        continue;
                }
 
                /* Discard unicast link messages destined for another node */
                if (unlikely(!msg_short(msg) &&
-                            (msg_destnode(msg) != tipc_own_addr)))
+                            (msg_destnode(msg) != tn->own_addr)))
                        goto discard;
 
                /* Locate neighboring node that sent message */
-               n_ptr = tipc_node_find(msg_prevnode(msg));
+               n_ptr = tipc_node_find(net, msg_prevnode(msg));
                if (unlikely(!n_ptr))
                        goto discard;
                tipc_node_lock(n_ptr);
@@ -1116,7 +1157,7 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
                /* Locate unicast link endpoint that should handle message */
                l_ptr = n_ptr->links[b_ptr->identity];
                if (unlikely(!l_ptr))
-                       goto unlock_discard;
+                       goto unlock;
 
                /* Verify that communication with node is currently allowed */
                if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
@@ -1127,7 +1168,7 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
                        n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
 
                if (tipc_node_blocked(n_ptr))
-                       goto unlock_discard;
+                       goto unlock;
 
                /* Validate message sequence number info */
                seq_no = msg_seqno(msg);
@@ -1151,18 +1192,16 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
                if (unlikely(l_ptr->next_out))
                        tipc_link_push_packets(l_ptr);
 
-               if (released && !skb_queue_empty(&l_ptr->waiting_sks)) {
+               if (released && !skb_queue_empty(&l_ptr->wakeupq))
                        link_prepare_wakeup(l_ptr);
-                       l_ptr->owner->action_flags |= TIPC_WAKEUP_USERS;
-               }
 
                /* Process the incoming packet */
                if (unlikely(!link_working_working(l_ptr))) {
                        if (msg_user(msg) == LINK_PROTOCOL) {
                                tipc_link_proto_rcv(l_ptr, skb);
                                link_retrieve_defq(l_ptr, &head);
-                               tipc_node_unlock(n_ptr);
-                               continue;
+                               skb = NULL;
+                               goto unlock;
                        }
 
                        /* Traffic message. Conditionally activate link */
@@ -1171,18 +1210,18 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
                        if (link_working_working(l_ptr)) {
                                /* Re-insert buffer in front of queue */
                                __skb_queue_head(&head, skb);
-                               tipc_node_unlock(n_ptr);
-                               continue;
+                               skb = NULL;
+                               goto unlock;
                        }
-                       goto unlock_discard;
+                       goto unlock;
                }
 
                /* Link is now in state WORKING_WORKING */
                if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
                        link_handle_out_of_seq_msg(l_ptr, skb);
                        link_retrieve_defq(l_ptr, &head);
-                       tipc_node_unlock(n_ptr);
-                       continue;
+                       skb = NULL;
+                       goto unlock;
                }
                l_ptr->next_in_no++;
                if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue)))
@@ -1192,95 +1231,102 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
                        l_ptr->stats.sent_acks++;
                        tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
                }
-
-               if (tipc_link_prepare_input(l_ptr, &skb)) {
-                       tipc_node_unlock(n_ptr);
-                       continue;
-               }
-               tipc_node_unlock(n_ptr);
-
-               if (tipc_link_input(l_ptr, skb) != 0)
-                       goto discard;
-               continue;
-unlock_discard:
+               tipc_link_input(l_ptr, skb);
+               skb = NULL;
+unlock:
                tipc_node_unlock(n_ptr);
 discard:
-               kfree_skb(skb);
+               if (unlikely(skb))
+                       kfree_skb(skb);
        }
 }
 
-/**
- * tipc_link_prepare_input - process TIPC link messages
- *
- * returns nonzero if the message was consumed
+/* tipc_data_input - deliver data and name distr msgs to upper layer
  *
+ * Consumes buffer if message is of right type
  * Node lock must be held
  */
-static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf)
+static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
 {
-       struct tipc_node *n;
-       struct tipc_msg *msg;
-       int res = -EINVAL;
+       struct tipc_node *node = link->owner;
+       struct tipc_msg *msg = buf_msg(skb);
+       u32 dport = msg_destport(msg);
 
-       n = l->owner;
-       msg = buf_msg(*buf);
        switch (msg_user(msg)) {
-       case CHANGEOVER_PROTOCOL:
-               if (tipc_link_tunnel_rcv(n, buf))
-                       res = 0;
-               break;
-       case MSG_FRAGMENTER:
-               l->stats.recv_fragments++;
-               if (tipc_buf_append(&l->reasm_buf, buf)) {
-                       l->stats.recv_fragmented++;
-                       res = 0;
-               } else if (!l->reasm_buf) {
-                       tipc_link_reset(l);
+       case TIPC_LOW_IMPORTANCE:
+       case TIPC_MEDIUM_IMPORTANCE:
+       case TIPC_HIGH_IMPORTANCE:
+       case TIPC_CRITICAL_IMPORTANCE:
+       case CONN_MANAGER:
+               if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
+                       node->inputq = &link->inputq;
+                       node->action_flags |= TIPC_MSG_EVT;
                }
-               break;
-       case MSG_BUNDLER:
-               l->stats.recv_bundles++;
-               l->stats.recv_bundled += msg_msgcnt(msg);
-               res = 0;
-               break;
+               return true;
        case NAME_DISTRIBUTOR:
-               n->bclink.recv_permitted = true;
-               res = 0;
-               break;
+               node->bclink.recv_permitted = true;
+               node->namedq = &link->namedq;
+               skb_queue_tail(&link->namedq, skb);
+               if (skb_queue_len(&link->namedq) == 1)
+                       node->action_flags |= TIPC_NAMED_MSG_EVT;
+               return true;
+       case MSG_BUNDLER:
+       case CHANGEOVER_PROTOCOL:
+       case MSG_FRAGMENTER:
        case BCAST_PROTOCOL:
-               tipc_link_sync_rcv(n, *buf);
-               break;
+               return false;
        default:
-               res = 0;
-       }
-       return res;
+               pr_warn("Dropping received illegal msg type\n");
+               kfree_skb(skb);
+               return false;
+       };
 }
-/**
- * tipc_link_input - Deliver message too higher layers
+
+/* tipc_link_input - process packet that has passed link protocol check
+ *
+ * Consumes buffer
+ * Node lock must be held
  */
-static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf)
+static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
 {
-       struct tipc_msg *msg = buf_msg(buf);
-       int res = 0;
+       struct tipc_node *node = link->owner;
+       struct tipc_msg *msg = buf_msg(skb);
+       struct sk_buff *iskb;
+       int pos = 0;
+
+       if (likely(tipc_data_input(link, skb)))
+               return;
 
        switch (msg_user(msg)) {
-       case TIPC_LOW_IMPORTANCE:
-       case TIPC_MEDIUM_IMPORTANCE:
-       case TIPC_HIGH_IMPORTANCE:
-       case TIPC_CRITICAL_IMPORTANCE:
-       case CONN_MANAGER:
-               tipc_sk_rcv(buf);
+       case CHANGEOVER_PROTOCOL:
+               if (!tipc_link_tunnel_rcv(node, &skb))
+                       break;
+               if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
+                       tipc_data_input(link, skb);
+                       break;
+               }
+       case MSG_BUNDLER:
+               link->stats.recv_bundles++;
+               link->stats.recv_bundled += msg_msgcnt(msg);
+
+               while (tipc_msg_extract(skb, &iskb, &pos))
+                       tipc_data_input(link, iskb);
                break;
-       case NAME_DISTRIBUTOR:
-               tipc_named_rcv(buf);
+       case MSG_FRAGMENTER:
+               link->stats.recv_fragments++;
+               if (tipc_buf_append(&link->reasm_buf, &skb)) {
+                       link->stats.recv_fragmented++;
+                       tipc_data_input(link, skb);
+               } else if (!link->reasm_buf) {
+                       tipc_link_reset(link);
+               }
                break;
-       case MSG_BUNDLER:
-               tipc_link_bundle_rcv(buf);
+       case BCAST_PROTOCOL:
+               tipc_link_sync_rcv(node, skb);
                break;
        default:
-               res = -EINVAL;
-       }
-       return res;
+               break;
+       };
 }
 
 /**
@@ -1381,7 +1427,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
        msg_set_type(msg, msg_typ);
        msg_set_net_plane(msg, l_ptr->net_plane);
        msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-       msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
+       msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
 
        if (msg_typ == STATE_MSG) {
                u32 next_sent = mod(l_ptr->next_out_no);
@@ -1445,7 +1491,8 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
        skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
        buf->priority = TC_PRIO_CONTROL;
 
-       tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
+       tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
+                        &l_ptr->media_addr);
        l_ptr->unacked_window = 0;
        kfree_skb(buf);
 }
@@ -1455,7 +1502,8 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
  * Note that network plane id propagates through the network, and may
  * change at any time. The node with lowest address rules
  */
-static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
+static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
+                               struct sk_buff *buf)
 {
        u32 rec_gap = 0;
        u32 max_pkt_info;
@@ -1468,7 +1516,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
                goto exit;
 
        if (l_ptr->net_plane != msg_net_plane(msg))
-               if (tipc_own_addr > msg_prevnode(msg))
+               if (link_own_addr(l_ptr) > msg_prevnode(msg))
                        l_ptr->net_plane = msg_net_plane(msg);
 
        switch (msg_type(msg)) {
@@ -1535,9 +1583,9 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
 
                if (msg_linkprio(msg) &&
                    (msg_linkprio(msg) != l_ptr->priority)) {
-                       pr_warn("%s<%s>, priority change %u->%u\n",
-                               link_rst_msg, l_ptr->name, l_ptr->priority,
-                               msg_linkprio(msg));
+                       pr_debug("%s<%s>, priority change %u->%u\n",
+                                link_rst_msg, l_ptr->name,
+                                l_ptr->priority, msg_linkprio(msg));
                        l_ptr->priority = msg_linkprio(msg);
                        tipc_link_reset(l_ptr); /* Enforce change to take effect */
                        break;
@@ -1636,8 +1684,8 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
        if (!tunnel)
                return;
 
-       tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
-                ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
+       tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
+                     ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
        msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
        msg_set_msgcnt(&tunnel_hdr, msgcount);
 
@@ -1694,8 +1742,8 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
        struct sk_buff *skb;
        struct tipc_msg tunnel_hdr;
 
-       tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
-                DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
+       tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
+                     DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
        msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
        msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
        skb_queue_walk(&l_ptr->outqueue, skb) {
@@ -1729,7 +1777,7 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
  * @from_pos: offset to extract from
  *
  * Returns a new message buffer containing an embedded message.  The
- * encapsulating message itself is left unchanged.
+ * encapsulating buffer is left unchanged.
  */
 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
 {
@@ -1743,8 +1791,6 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
        return eb;
 }
 
-
-
 /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
  * Owner node is locked.
  */
@@ -1804,10 +1850,8 @@ static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
                }
        }
 exit:
-       if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) {
-               tipc_node_detach_link(l_ptr->owner, l_ptr);
-               kfree(l_ptr);
-       }
+       if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED))
+               tipc_link_delete(l_ptr);
        return buf;
 }
 
@@ -1845,50 +1889,16 @@ exit:
        return *buf != NULL;
 }
 
-/*
- *  Bundler functionality:
- */
-void tipc_link_bundle_rcv(struct sk_buff *buf)
+static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
 {
-       u32 msgcount = msg_msgcnt(buf_msg(buf));
-       u32 pos = INT_H_SIZE;
-       struct sk_buff *obuf;
-       struct tipc_msg *omsg;
-
-       while (msgcount--) {
-               obuf = buf_extract(buf, pos);
-               if (obuf == NULL) {
-                       pr_warn("Link unable to unbundle message(s)\n");
-                       break;
-               }
-               omsg = buf_msg(obuf);
-               pos += align(msg_size(omsg));
-               if (msg_isdata(omsg)) {
-                       if (unlikely(msg_type(omsg) == TIPC_MCAST_MSG))
-                               tipc_sk_mcast_rcv(obuf);
-                       else
-                               tipc_sk_rcv(obuf);
-               } else if (msg_user(omsg) == CONN_MANAGER) {
-                       tipc_sk_rcv(obuf);
-               } else if (msg_user(omsg) == NAME_DISTRIBUTOR) {
-                       tipc_named_rcv(obuf);
-               } else {
-                       pr_warn("Illegal bundled msg: %u\n", msg_user(omsg));
-                       kfree_skb(obuf);
-               }
-       }
-       kfree_skb(buf);
-}
+       unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
 
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
-{
-       if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
+       if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
                return;
 
-       l_ptr->tolerance = tolerance;
-       l_ptr->continuity_interval =
-               ((tolerance / 4) > 500) ? 500 : tolerance / 4;
-       l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
+       l_ptr->tolerance = tol;
+       l_ptr->cont_intv = msecs_to_jiffies(intv);
+       l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
 }
 
 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
@@ -1911,14 +1921,17 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
 }
 
 /* tipc_link_find_owner - locate owner node of link by link's name
+ * @net: the applicable net namespace
  * @name: pointer to link name string
  * @bearer_id: pointer to index in 'node->links' array where the link was found.
  *
  * Returns pointer to node owning the link, or 0 if no matching link is found.
  */
-static struct tipc_node *tipc_link_find_owner(const char *link_name,
+static struct tipc_node *tipc_link_find_owner(struct net *net,
+                                             const char *link_name,
                                              unsigned int *bearer_id)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_link *l_ptr;
        struct tipc_node *n_ptr;
        struct tipc_node *found_node = NULL;
@@ -1926,7 +1939,7 @@ static struct tipc_node *tipc_link_find_owner(const char *link_name,
 
        *bearer_id = 0;
        rcu_read_lock();
-       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+       list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
                tipc_node_lock(n_ptr);
                for (i = 0; i < MAX_BEARERS; i++) {
                        l_ptr = n_ptr->links[i];
@@ -1970,6 +1983,7 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
 
 /**
  * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
+ * @net: the applicable net namespace
  * @name: ptr to link, bearer, or media name
  * @new_value: new value of link, bearer, or media setting
  * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
@@ -1978,7 +1992,8 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
  *
  * Returns 0 if value updated and negative value on error.
  */
-static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
+static int link_cmd_set_value(struct net *net, const char *name, u32 new_value,
+                             u16 cmd)
 {
        struct tipc_node *node;
        struct tipc_link *l_ptr;
@@ -1987,7 +2002,7 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
        int bearer_id;
        int res = 0;
 
-       node = tipc_link_find_owner(name, &bearer_id);
+       node = tipc_link_find_owner(net, name, &bearer_id);
        if (node) {
                tipc_node_lock(node);
                l_ptr = node->links[bearer_id];
@@ -2016,7 +2031,7 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
                return res;
        }
 
-       b_ptr = tipc_bearer_find(name);
+       b_ptr = tipc_bearer_find(net, name);
        if (b_ptr) {
                switch (cmd) {
                case TIPC_CMD_SET_LINK_TOL:
@@ -2055,8 +2070,8 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
        return res;
 }
 
-struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
-                                    u16 cmd)
+struct sk_buff *tipc_link_cmd_config(struct net *net, const void *req_tlv_area,
+                                    int req_tlv_space, u16 cmd)
 {
        struct tipc_link_config *args;
        u32 new_value;
@@ -2074,13 +2089,13 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
 
        if (!strcmp(args->name, tipc_bclink_name)) {
                if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
-                   (tipc_bclink_set_queue_limits(new_value) == 0))
+                   (tipc_bclink_set_queue_limits(net, new_value) == 0))
                        return tipc_cfg_reply_none();
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                   " (cannot change setting on broadcast link)");
        }
 
-       res = link_cmd_set_value(args->name, new_value, cmd);
+       res = link_cmd_set_value(net, args->name, new_value, cmd);
        if (res)
                return tipc_cfg_reply_error_string("cannot change link setting");
 
@@ -2098,7 +2113,9 @@ static void link_reset_statistics(struct tipc_link *l_ptr)
        l_ptr->stats.recv_info = l_ptr->next_in_no;
 }
 
-struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_link_cmd_reset_stats(struct net *net,
+                                         const void *req_tlv_area,
+                                         int req_tlv_space)
 {
        char *link_name;
        struct tipc_link *l_ptr;
@@ -2110,11 +2127,11 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
 
        link_name = (char *)TLV_DATA(req_tlv_area);
        if (!strcmp(link_name, tipc_bclink_name)) {
-               if (tipc_bclink_reset_stats())
+               if (tipc_bclink_reset_stats(net))
                        return tipc_cfg_reply_error_string("link not found");
                return tipc_cfg_reply_none();
        }
-       node = tipc_link_find_owner(link_name, &bearer_id);
+       node = tipc_link_find_owner(net, link_name, &bearer_id);
        if (!node)
                return tipc_cfg_reply_error_string("link not found");
 
@@ -2139,13 +2156,15 @@ static u32 percent(u32 count, u32 total)
 
 /**
  * tipc_link_stats - print link statistics
+ * @net: the applicable net namespace
  * @name: link name
  * @buf: print buffer area
  * @buf_size: size of print buffer area
  *
  * Returns length of print buffer data string (or 0 if error)
  */
-static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
+static int tipc_link_stats(struct net *net, const char *name, char *buf,
+                          const u32 buf_size)
 {
        struct tipc_link *l;
        struct tipc_stats *s;
@@ -2156,9 +2175,9 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
        int ret;
 
        if (!strcmp(name, tipc_bclink_name))
-               return tipc_bclink_stats(buf, buf_size);
+               return tipc_bclink_stats(net, buf, buf_size);
 
-       node = tipc_link_find_owner(name, &bearer_id);
+       node = tipc_link_find_owner(net, name, &bearer_id);
        if (!node)
                return 0;
 
@@ -2235,7 +2254,9 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
        return ret;
 }
 
-struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_link_cmd_show_stats(struct net *net,
+                                        const void *req_tlv_area,
+                                        int req_tlv_space)
 {
        struct sk_buff *buf;
        struct tlv_desc *rep_tlv;
@@ -2253,7 +2274,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
        rep_tlv = (struct tlv_desc *)buf->data;
        pb = TLV_DATA(rep_tlv);
        pb_len = ULTRA_STRING_MAX_LEN;
-       str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
+       str_len = tipc_link_stats(net, (char *)TLV_DATA(req_tlv_area),
                                  pb, pb_len);
        if (!str_len) {
                kfree_skb(buf);
@@ -2266,39 +2287,13 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
        return buf;
 }
 
-/**
- * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
- * @dest: network address of destination node
- * @selector: used to select from set of active links
- *
- * If no active link can be found, uses default maximum packet size.
- */
-u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
-{
-       struct tipc_node *n_ptr;
-       struct tipc_link *l_ptr;
-       u32 res = MAX_PKT_DEFAULT;
-
-       if (dest == tipc_own_addr)
-               return MAX_MSG_SIZE;
-
-       n_ptr = tipc_node_find(dest);
-       if (n_ptr) {
-               tipc_node_lock(n_ptr);
-               l_ptr = n_ptr->active_links[selector & 1];
-               if (l_ptr)
-                       res = l_ptr->max_pkt;
-               tipc_node_unlock(n_ptr);
-       }
-       return res;
-}
-
 static void link_print(struct tipc_link *l_ptr, const char *str)
 {
+       struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
        struct tipc_bearer *b_ptr;
 
        rcu_read_lock();
-       b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
+       b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
        if (b_ptr)
                pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
        rcu_read_unlock();
@@ -2362,6 +2357,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
        struct tipc_link *link;
        struct tipc_node *node;
        struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
+       struct net *net = genl_info_net(info);
 
        if (!info->attrs[TIPC_NLA_LINK])
                return -EINVAL;
@@ -2377,7 +2373,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
 
        name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
 
-       node = tipc_link_find_owner(name, &bearer_id);
+       node = tipc_link_find_owner(net, name, &bearer_id);
        if (!node)
                return -EINVAL;
 
@@ -2493,12 +2489,14 @@ msg_full:
 }
 
 /* Caller should hold appropriate locks to protect the link */
-static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link)
+static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
+                             struct tipc_link *link)
 {
        int err;
        void *hdr;
        struct nlattr *attrs;
        struct nlattr *prop;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
        hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
                          NLM_F_MULTI, TIPC_NL_LINK_GET);
@@ -2512,7 +2510,7 @@ static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link)
        if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
                goto attr_msg_full;
        if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
-                       tipc_cluster_mask(tipc_own_addr)))
+                       tipc_cluster_mask(tn->own_addr)))
                goto attr_msg_full;
        if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
                goto attr_msg_full;
@@ -2562,9 +2560,8 @@ msg_full:
 }
 
 /* Caller should hold node lock  */
-static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
-                                   struct tipc_node *node,
-                                   u32 *prev_link)
+static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
+                                   struct tipc_node *node, u32 *prev_link)
 {
        u32 i;
        int err;
@@ -2575,7 +2572,7 @@ static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
                if (!node->links[i])
                        continue;
 
-               err = __tipc_nl_add_link(msg, node->links[i]);
+               err = __tipc_nl_add_link(net, msg, node->links[i]);
                if (err)
                        return err;
        }
@@ -2586,6 +2583,8 @@ static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
 
 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct net *net = sock_net(skb->sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *node;
        struct tipc_nl_msg msg;
        u32 prev_node = cb->args[0];
@@ -2603,7 +2602,7 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
        rcu_read_lock();
 
        if (prev_node) {
-               node = tipc_node_find(prev_node);
+               node = tipc_node_find(net, prev_node);
                if (!node) {
                        /* We never set seq or call nl_dump_check_consistent()
                         * this means that setting prev_seq here will cause the
@@ -2615,9 +2614,11 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        goto out;
                }
 
-               list_for_each_entry_continue_rcu(node, &tipc_node_list, list) {
+               list_for_each_entry_continue_rcu(node, &tn->node_list,
+                                                list) {
                        tipc_node_lock(node);
-                       err = __tipc_nl_add_node_links(&msg, node, &prev_link);
+                       err = __tipc_nl_add_node_links(net, &msg, node,
+                                                      &prev_link);
                        tipc_node_unlock(node);
                        if (err)
                                goto out;
@@ -2625,13 +2626,14 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        prev_node = node->addr;
                }
        } else {
-               err = tipc_nl_add_bc_link(&msg);
+               err = tipc_nl_add_bc_link(net, &msg);
                if (err)
                        goto out;
 
-               list_for_each_entry_rcu(node, &tipc_node_list, list) {
+               list_for_each_entry_rcu(node, &tn->node_list, list) {
                        tipc_node_lock(node);
-                       err = __tipc_nl_add_node_links(&msg, node, &prev_link);
+                       err = __tipc_nl_add_node_links(net, &msg, node,
+                                                      &prev_link);
                        tipc_node_unlock(node);
                        if (err)
                                goto out;
@@ -2652,6 +2654,7 @@ out:
 
 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
 {
+       struct net *net = genl_info_net(info);
        struct sk_buff *ans_skb;
        struct tipc_nl_msg msg;
        struct tipc_link *link;
@@ -2664,7 +2667,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
                return -EINVAL;
 
        name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
-       node = tipc_link_find_owner(name, &bearer_id);
+       node = tipc_link_find_owner(net, name, &bearer_id);
        if (!node)
                return -EINVAL;
 
@@ -2683,7 +2686,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
                goto err_out;
        }
 
-       err = __tipc_nl_add_link(&msg, link);
+       err = __tipc_nl_add_link(net, &msg, link);
        if (err)
                goto err_out;
 
@@ -2706,6 +2709,7 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
        struct tipc_link *link;
        struct tipc_node *node;
        struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
+       struct net *net = genl_info_net(info);
 
        if (!info->attrs[TIPC_NLA_LINK])
                return -EINVAL;
@@ -2722,13 +2726,13 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
        link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
 
        if (strcmp(link_name, tipc_bclink_name) == 0) {
-               err = tipc_bclink_reset_stats();
+               err = tipc_bclink_reset_stats(net);
                if (err)
                        return err;
                return 0;
        }
 
-       node = tipc_link_find_owner(link_name, &bearer_id);
+       node = tipc_link_find_owner(net, link_name, &bearer_id);
        if (!node)
                return -EINVAL;
 
index 55812e87ca1e2a4b6cb460e067599edc9bbf158d..34d3f55c4ceabfb23246eb9eef6b2966441916a4 100644 (file)
 #include "msg.h"
 #include "node.h"
 
+/* TIPC-specific error codes
+*/
+#define ELINKCONG EAGAIN       /* link congestion <=> resource unavailable */
+
 /* Out-of-range value for link sequence numbers
  */
 #define INVALID_LINK_SEQ 0x10000
@@ -99,13 +103,14 @@ struct tipc_stats {
  * @media_addr: media address to use when sending messages over link
  * @timer: link timer
  * @owner: pointer to peer node
+ * @refcnt: reference counter for permanent references (owner node & timer)
  * @flags: execution state flags for link endpoint instance
  * @checkpoint: reference point for triggering link continuity checking
  * @peer_session: link session # being used by peer end of link
  * @peer_bearer_id: bearer id used by link's peer endpoint
  * @bearer_id: local bearer id used by link
  * @tolerance: minimum link continuity loss needed to reset link [in ms]
- * @continuity_interval: link continuity testing interval [in ms]
+ * @cont_intv: link continuity testing interval
  * @abort_limit: # of unacknowledged continuity probes needed to reset link
  * @state: current state of link FSM
  * @fsm_msg_cnt: # of protocol messages link FSM has sent in current state
@@ -126,8 +131,10 @@ struct tipc_stats {
  * @next_in_no: next sequence number to expect for inbound messages
  * @deferred_queue: deferred queue saved OOS b'cast message received from node
  * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
+ * @inputq: buffer queue for messages to be delivered upwards
+ * @namedq: buffer queue for name table messages to be delivered upwards
  * @next_out: ptr to first unsent outbound message in queue
- * @waiting_sks: linked list of sockets waiting for link congestion to abate
+ * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
  * @long_msg_seq_no: next identifier to use for outbound fragmented messages
  * @reasm_buf: head of partially reassembled inbound message fragments
  * @stats: collects statistics regarding link activity
@@ -138,6 +145,7 @@ struct tipc_link {
        struct tipc_media_addr media_addr;
        struct timer_list timer;
        struct tipc_node *owner;
+       struct kref ref;
 
        /* Management and link supervision data */
        unsigned int flags;
@@ -146,7 +154,7 @@ struct tipc_link {
        u32 peer_bearer_id;
        u32 bearer_id;
        u32 tolerance;
-       u32 continuity_interval;
+       unsigned long cont_intv;
        u32 abort_limit;
        int state;
        u32 fsm_msg_cnt;
@@ -178,10 +186,12 @@ struct tipc_link {
        u32 next_in_no;
        struct sk_buff_head deferred_queue;
        u32 unacked_window;
+       struct sk_buff_head inputq;
+       struct sk_buff_head namedq;
 
        /* Congestion handling */
        struct sk_buff *next_out;
-       struct sk_buff_head waiting_sks;
+       struct sk_buff_head wakeupq;
 
        /* Fragmentation/reassembly */
        u32 long_msg_seq_no;
@@ -196,28 +206,32 @@ struct tipc_port;
 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
                              struct tipc_bearer *b_ptr,
                              const struct tipc_media_addr *media_addr);
-void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down);
+void tipc_link_delete(struct tipc_link *link);
+void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
+                          bool shutting_down);
 void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
 void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
 void tipc_link_reset_fragments(struct tipc_link *l_ptr);
 int tipc_link_is_up(struct tipc_link *l_ptr);
 int tipc_link_is_active(struct tipc_link *l_ptr);
 void tipc_link_purge_queues(struct tipc_link *l_ptr);
-struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area,
-                                    int req_tlv_space,
-                                    u16 cmd);
-struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area,
+struct sk_buff *tipc_link_cmd_config(struct net *net, const void *req_tlv_area,
+                                    int req_tlv_space, u16 cmd);
+struct sk_buff *tipc_link_cmd_show_stats(struct net *net,
+                                        const void *req_tlv_area,
                                         int req_tlv_space);
-struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
+struct sk_buff *tipc_link_cmd_reset_stats(struct net *net,
+                                         const void *req_tlv_area,
                                          int req_tlv_space);
 void tipc_link_reset_all(struct tipc_node *node);
 void tipc_link_reset(struct tipc_link *l_ptr);
-void tipc_link_reset_list(unsigned int bearer_id);
-int tipc_link_xmit_skb(struct sk_buff *skb, u32 dest, u32 selector);
-int tipc_link_xmit(struct sk_buff_head *list, u32 dest, u32 selector);
-int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list);
-u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
-void tipc_link_bundle_rcv(struct sk_buff *buf);
+void tipc_link_reset_list(struct net *net, unsigned int bearer_id);
+int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
+                      u32 selector);
+int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
+                  u32 selector);
+int __tipc_link_xmit(struct net *net, struct tipc_link *link,
+                    struct sk_buff_head *list);
 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
                          u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
 void tipc_link_push_packets(struct tipc_link *l_ptr);
@@ -233,6 +247,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
+void link_prepare_wakeup(struct tipc_link *l);
 
 /*
  * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
@@ -267,6 +282,10 @@ static inline u32 lesser(u32 left, u32 right)
        return less_eq(left, right) ? left : right;
 }
 
+static inline u32 link_own_addr(struct tipc_link *l)
+{
+       return msg_prevnode(l->pmsg);
+}
 
 /*
  * Link status checking routines
index a687b30a699cb651eaf7dd5f1c7d5fa3459bf9b5..b6eb90cd3ef7053ffe8d73143a53b8021e2770d3 100644 (file)
@@ -34,6 +34,7 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <net/sock.h>
 #include "core.h"
 #include "msg.h"
 #include "addr.h"
@@ -46,25 +47,48 @@ static unsigned int align(unsigned int i)
        return (i + 3) & ~3u;
 }
 
-void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
-                  u32 destnode)
+/**
+ * tipc_buf_acquire - creates a TIPC message buffer
+ * @size: message size (including TIPC header)
+ *
+ * Returns a new buffer with data pointers set to the specified size.
+ *
+ * NOTE: Headroom is reserved to allow prepending of a data link header.
+ *       There may also be unrequested tailroom present at the buffer's end.
+ */
+struct sk_buff *tipc_buf_acquire(u32 size)
+{
+       struct sk_buff *skb;
+       unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
+
+       skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
+       if (skb) {
+               skb_reserve(skb, BUF_HEADROOM);
+               skb_put(skb, size);
+               skb->next = NULL;
+       }
+       return skb;
+}
+
+void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
+                  u32 hsize, u32 dnode)
 {
        memset(m, 0, hsize);
        msg_set_version(m);
        msg_set_user(m, user);
        msg_set_hdr_sz(m, hsize);
        msg_set_size(m, hsize);
-       msg_set_prevnode(m, tipc_own_addr);
+       msg_set_prevnode(m, own_node);
        msg_set_type(m, type);
        if (hsize > SHORT_H_SIZE) {
-               msg_set_orignode(m, tipc_own_addr);
-               msg_set_destnode(m, destnode);
+               msg_set_orignode(m, own_node);
+               msg_set_destnode(m, dnode);
        }
 }
 
-struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
-                               uint data_sz, u32 dnode, u32 onode,
-                               u32 dport, u32 oport, int errcode)
+struct sk_buff *tipc_msg_create(uint user, uint type,
+                               uint hdr_sz, uint data_sz, u32 dnode,
+                               u32 onode, u32 dport, u32 oport, int errcode)
 {
        struct tipc_msg *msg;
        struct sk_buff *buf;
@@ -74,9 +98,8 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
                return NULL;
 
        msg = buf_msg(buf);
-       tipc_msg_init(msg, user, type, hdr_sz, dnode);
+       tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
        msg_set_size(msg, hdr_sz + data_sz);
-       msg_set_prevnode(msg, onode);
        msg_set_origport(msg, oport);
        msg_set_destport(msg, dport);
        msg_set_errcode(msg, errcode);
@@ -163,15 +186,14 @@ err:
  * tipc_msg_build - create buffer chain containing specified header and data
  * @mhdr: Message header, to be prepended to data
  * @m: User message
- * @offset: Posision in iov to start copying from
  * @dsz: Total length of user data
  * @pktmax: Max packet size that can be used
  * @list: Buffer or chain of buffers to be returned to caller
  *
  * Returns message data size or errno: -ENOMEM, -EFAULT
  */
-int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
-                  int dsz, int pktmax, struct sk_buff_head *list)
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
+                  int offset, int dsz, int pktmax, struct sk_buff_head *list)
 {
        int mhsz = msg_hdr_sz(mhdr);
        int msz = mhsz + dsz;
@@ -191,19 +213,19 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
                skb = tipc_buf_acquire(msz);
                if (unlikely(!skb))
                        return -ENOMEM;
+               skb_orphan(skb);
                __skb_queue_tail(list, skb);
                skb_copy_to_linear_data(skb, mhdr, mhsz);
                pktpos = skb->data + mhsz;
-               if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset,
-                                                dsz))
+               if (copy_from_iter(pktpos, dsz, &m->msg_iter) == dsz)
                        return dsz;
                rc = -EFAULT;
                goto error;
        }
 
        /* Prepare reusable fragment header */
-       tipc_msg_init(&pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
-                     INT_H_SIZE, msg_destnode(mhdr));
+       tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
+                     FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
        msg_set_size(&pkthdr, pktmax);
        msg_set_fragm_no(&pkthdr, pktno);
 
@@ -211,6 +233,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
        skb = tipc_buf_acquire(pktmax);
        if (!skb)
                return -ENOMEM;
+       skb_orphan(skb);
        __skb_queue_tail(list, skb);
        pktpos = skb->data;
        skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
@@ -224,12 +247,11 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
                if (drem < pktrem)
                        pktrem = drem;
 
-               if (memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, pktrem)) {
+               if (copy_from_iter(pktpos, pktrem, &m->msg_iter) != pktrem) {
                        rc = -EFAULT;
                        goto error;
                }
                drem -= pktrem;
-               offset += pktrem;
 
                if (!drem)
                        break;
@@ -244,6 +266,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
                        rc = -ENOMEM;
                        goto error;
                }
+               skb_orphan(skb);
                __skb_queue_tail(list, skb);
                msg_set_type(&pkthdr, FRAGMENT);
                msg_set_size(&pkthdr, pktsz);
@@ -303,6 +326,40 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
        return true;
 }
 
+/**
+ *  tipc_msg_extract(): extract bundled inner packet from buffer
+ *  @skb: linear outer buffer, to be extracted from.
+ *  @iskb: extracted inner buffer, to be returned
+ *  @pos: position of msg to be extracted. Returns with pointer of next msg
+ *  Consumes outer buffer when last packet extracted
+ *  Returns true when when there is an extracted buffer, otherwise false
+ */
+bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
+{
+       struct tipc_msg *msg = buf_msg(skb);
+       int imsz;
+       struct tipc_msg *imsg = (struct tipc_msg *)(msg_data(msg) + *pos);
+
+       /* Is there space left for shortest possible message? */
+       if (*pos > (msg_data_sz(msg) - SHORT_H_SIZE))
+               goto none;
+       imsz = msg_size(imsg);
+
+       /* Is there space left for current message ? */
+       if ((*pos + imsz) > msg_data_sz(msg))
+               goto none;
+       *iskb = tipc_buf_acquire(imsz);
+       if (!*iskb)
+               goto none;
+       skb_copy_to_linear_data(*iskb, imsg, imsz);
+       *pos += align(imsz);
+       return true;
+none:
+       kfree_skb(skb);
+       *iskb = NULL;
+       return false;
+}
+
 /**
  * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
  * @list: the buffer chain
@@ -312,8 +369,8 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
  * Replaces buffer if successful
  * Returns true if success, otherwise false
  */
-bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
-                         u32 mtu, u32 dnode)
+bool tipc_msg_make_bundle(struct sk_buff_head *list,
+                         struct sk_buff *skb, u32 mtu, u32 dnode)
 {
        struct sk_buff *bskb;
        struct tipc_msg *bmsg;
@@ -336,7 +393,8 @@ bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
 
        skb_trim(bskb, INT_H_SIZE);
        bmsg = buf_msg(bskb);
-       tipc_msg_init(bmsg, MSG_BUNDLER, 0, INT_H_SIZE, dnode);
+       tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
+                     INT_H_SIZE, dnode);
        msg_set_seqno(bmsg, msg_seqno(msg));
        msg_set_ack(bmsg, msg_ack(msg));
        msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
@@ -353,7 +411,8 @@ bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
  * Consumes buffer if failure
  * Returns true if success, otherwise false
  */
-bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err)
+bool tipc_msg_reverse(u32 own_addr,  struct sk_buff *buf, u32 *dnode,
+                     int err)
 {
        struct tipc_msg *msg = buf_msg(buf);
        uint imp = msg_importance(msg);
@@ -374,7 +433,7 @@ bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err)
        msg_set_errcode(msg, err);
        msg_set_origport(msg, msg_destport(&ohdr));
        msg_set_destport(msg, msg_origport(&ohdr));
-       msg_set_prevnode(msg, tipc_own_addr);
+       msg_set_prevnode(msg, own_addr);
        if (!msg_short(msg)) {
                msg_set_orignode(msg, msg_destnode(&ohdr));
                msg_set_destnode(msg, msg_orignode(&ohdr));
@@ -386,43 +445,43 @@ bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err)
        return true;
 exit:
        kfree_skb(buf);
+       *dnode = 0;
        return false;
 }
 
 /**
- * tipc_msg_eval: determine fate of message that found no destination
- * @buf: the buffer containing the message.
- * @dnode: return value: next-hop node, if message to be forwarded
- * @err: error code to use, if message to be rejected
- *
+ * tipc_msg_lookup_dest(): try to find new destination for named message
+ * @skb: the buffer containing the message.
+ * @dnode: return value: next-hop node, if destination found
+ * @err: return value: error code to use, if message to be rejected
  * Does not consume buffer
- * Returns 0 (TIPC_OK) if message ok and we can try again, -TIPC error
- * code if message to be rejected
+ * Returns true if a destination is found, false otherwise
  */
-int tipc_msg_eval(struct sk_buff *buf, u32 *dnode)
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
+                         u32 *dnode, int *err)
 {
-       struct tipc_msg *msg = buf_msg(buf);
+       struct tipc_msg *msg = buf_msg(skb);
        u32 dport;
 
-       if (msg_type(msg) != TIPC_NAMED_MSG)
-               return -TIPC_ERR_NO_PORT;
-       if (skb_linearize(buf))
-               return -TIPC_ERR_NO_NAME;
-       if (msg_data_sz(msg) > MAX_FORWARD_SIZE)
-               return -TIPC_ERR_NO_NAME;
+       if (!msg_isdata(msg))
+               return false;
+       if (!msg_named(msg))
+               return false;
+       *err = -TIPC_ERR_NO_NAME;
+       if (skb_linearize(skb))
+               return false;
        if (msg_reroute_cnt(msg) > 0)
-               return -TIPC_ERR_NO_NAME;
-
-       *dnode = addr_domain(msg_lookup_scope(msg));
-       dport = tipc_nametbl_translate(msg_nametype(msg),
-                                      msg_nameinst(msg),
-                                      dnode);
+               return false;
+       *dnode = addr_domain(net, msg_lookup_scope(msg));
+       dport = tipc_nametbl_translate(net, msg_nametype(msg),
+                                      msg_nameinst(msg), dnode);
        if (!dport)
-               return -TIPC_ERR_NO_NAME;
+               return false;
        msg_incr_reroute_cnt(msg);
        msg_set_destnode(msg, *dnode);
        msg_set_destport(msg, dport);
-       return TIPC_OK;
+       *err = TIPC_OK;
+       return true;
 }
 
 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
index d5c83d7ecb479f30de9fe9b8f8aadbac5aa0c64b..9ace47f44a69ed3000c6739aee55fbf30b04081c 100644 (file)
@@ -37,7 +37,7 @@
 #ifndef _TIPC_MSG_H
 #define _TIPC_MSG_H
 
-#include "bearer.h"
+#include <linux/tipc.h>
 
 /*
  * Constants and routines used to read and write TIPC payload message headers
@@ -45,6 +45,7 @@
  * Note: Some items are also used with TIPC internal message headers
  */
 #define TIPC_VERSION              2
+struct plist;
 
 /*
  * Payload message users are defined in TIPC's public API:
 
 #define TIPC_MEDIA_ADDR_OFFSET 5
 
+/**
+ * TIPC message buffer code
+ *
+ * TIPC message buffer headroom reserves space for the worst-case
+ * link-level device header (in case the message is sent off-node).
+ *
+ * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
+ *       are word aligned for quicker access
+ */
+#define BUF_HEADROOM LL_MAX_HEADER
+
+struct tipc_skb_cb {
+       void *handle;
+       struct sk_buff *tail;
+       bool deferred;
+       bool wakeup_pending;
+       bool bundling;
+       u16 chain_sz;
+       u16 chain_imp;
+};
+
+#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
 
 struct tipc_msg {
        __be32 hdr[15];
 };
 
+static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
+{
+       return (struct tipc_msg *)skb->data;
+}
 
 static inline u32 msg_word(struct tipc_msg *m, u32 pos)
 {
@@ -721,27 +748,111 @@ static inline u32 msg_tot_origport(struct tipc_msg *m)
        return msg_origport(m);
 }
 
-bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err);
-
-int tipc_msg_eval(struct sk_buff *buf, u32 *dnode);
-
-void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
-                  u32 destnode);
-
+struct sk_buff *tipc_buf_acquire(u32 size);
+bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
+                     int err);
+void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
+                  u32 hsize, u32 destnode);
 struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
                                uint data_sz, u32 dnode, u32 onode,
                                u32 dport, u32 oport, int errcode);
-
 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
-
 bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu);
+bool tipc_msg_make_bundle(struct sk_buff_head *list,
+                         struct sk_buff *skb, u32 mtu, u32 dnode);
+bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
+                  int offset, int dsz, int mtu, struct sk_buff_head *list);
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, u32 *dnode,
+                         int *err);
+struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
 
-bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
-                         u32 mtu, u32 dnode);
-
-int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
-                  int dsz, int mtu, struct sk_buff_head *list);
+/* tipc_skb_peek(): peek and reserve first buffer in list
+ * @list: list to be peeked in
+ * Returns pointer to first buffer in list, if any
+ */
+static inline struct sk_buff *tipc_skb_peek(struct sk_buff_head *list,
+                                           spinlock_t *lock)
+{
+       struct sk_buff *skb;
+
+       spin_lock_bh(lock);
+       skb = skb_peek(list);
+       if (skb)
+               skb_get(skb);
+       spin_unlock_bh(lock);
+       return skb;
+}
+
+/* tipc_skb_peek_port(): find a destination port, ignoring all destinations
+ *                       up to and including 'filter'.
+ * Note: ignoring previously tried destinations minimizes the risk of
+ *       contention on the socket lock
+ * @list: list to be peeked in
+ * @filter: last destination to be ignored from search
+ * Returns a destination port number, of applicable.
+ */
+static inline u32 tipc_skb_peek_port(struct sk_buff_head *list, u32 filter)
+{
+       struct sk_buff *skb;
+       u32 dport = 0;
+       bool ignore = true;
+
+       spin_lock_bh(&list->lock);
+       skb_queue_walk(list, skb) {
+               dport = msg_destport(buf_msg(skb));
+               if (!filter || skb_queue_is_last(list, skb))
+                       break;
+               if (dport == filter)
+                       ignore = false;
+               else if (!ignore)
+                       break;
+       }
+       spin_unlock_bh(&list->lock);
+       return dport;
+}
+
+/* tipc_skb_dequeue(): unlink first buffer with dest 'dport' from list
+ * @list: list to be unlinked from
+ * @dport: selection criteria for buffer to unlink
+ */
+static inline struct sk_buff *tipc_skb_dequeue(struct sk_buff_head *list,
+                                              u32 dport)
+{
+       struct sk_buff *_skb, *tmp, *skb = NULL;
+
+       spin_lock_bh(&list->lock);
+       skb_queue_walk_safe(list, _skb, tmp) {
+               if (msg_destport(buf_msg(_skb)) == dport) {
+                       __skb_unlink(_skb, list);
+                       skb = _skb;
+                       break;
+               }
+       }
+       spin_unlock_bh(&list->lock);
+       return skb;
+}
+
+/* tipc_skb_queue_tail(): add buffer to tail of list;
+ * @list: list to be appended to
+ * @skb: buffer to append. Always appended
+ * @dport: the destination port of the buffer
+ * returns true if dport differs from previous destination
+ */
+static inline bool tipc_skb_queue_tail(struct sk_buff_head *list,
+                                      struct sk_buff *skb, u32 dport)
+{
+       struct sk_buff *_skb = NULL;
+       bool rv = false;
 
-struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
+       spin_lock_bh(&list->lock);
+       _skb = skb_peek_tail(list);
+       if (!_skb || (msg_destport(buf_msg(_skb)) != dport) ||
+           (skb_queue_len(list) > 32))
+               rv = true;
+       __skb_queue_tail(list, skb);
+       spin_unlock_bh(&list->lock);
+       return rv;
+}
 
 #endif
index ba6083dca95b97d61d73a4d1a1df13c31a9e728c..fcb07915aaacc244bedc54f660b774c2550b85a5 100644 (file)
@@ -68,29 +68,33 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
 /**
  * named_prepare_buf - allocate & initialize a publication message
  */
-static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
+static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
+                                        u32 dest)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
        struct tipc_msg *msg;
 
        if (buf != NULL) {
                msg = buf_msg(buf);
-               tipc_msg_init(msg, NAME_DISTRIBUTOR, type, INT_H_SIZE, dest);
+               tipc_msg_init(tn->own_addr, msg, NAME_DISTRIBUTOR, type,
+                             INT_H_SIZE, dest);
                msg_set_size(msg, INT_H_SIZE + size);
        }
        return buf;
 }
 
-void named_cluster_distribute(struct sk_buff *skb)
+void named_cluster_distribute(struct net *net, struct sk_buff *skb)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff *oskb;
        struct tipc_node *node;
        u32 dnode;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(node, &tipc_node_list, list) {
+       list_for_each_entry_rcu(node, &tn->node_list, list) {
                dnode = node->addr;
-               if (in_own_node(dnode))
+               if (in_own_node(net, dnode))
                        continue;
                if (!tipc_node_active_links(node))
                        continue;
@@ -98,7 +102,7 @@ void named_cluster_distribute(struct sk_buff *skb)
                if (!oskb)
                        break;
                msg_set_destnode(buf_msg(oskb), dnode);
-               tipc_link_xmit_skb(oskb, dnode, dnode);
+               tipc_link_xmit_skb(net, oskb, dnode, dnode);
        }
        rcu_read_unlock();
 
@@ -108,18 +112,19 @@ void named_cluster_distribute(struct sk_buff *skb)
 /**
  * tipc_named_publish - tell other nodes about a new publication by this node
  */
-struct sk_buff *tipc_named_publish(struct publication *publ)
+struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff *buf;
        struct distr_item *item;
 
        list_add_tail_rcu(&publ->local_list,
-                         &tipc_nametbl->publ_list[publ->scope]);
+                         &tn->nametbl->publ_list[publ->scope]);
 
        if (publ->scope == TIPC_NODE_SCOPE)
                return NULL;
 
-       buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
+       buf = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
        if (!buf) {
                pr_warn("Publication distribution failure\n");
                return NULL;
@@ -133,7 +138,7 @@ struct sk_buff *tipc_named_publish(struct publication *publ)
 /**
  * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
  */
-struct sk_buff *tipc_named_withdraw(struct publication *publ)
+struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
 {
        struct sk_buff *buf;
        struct distr_item *item;
@@ -143,7 +148,7 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
        if (publ->scope == TIPC_NODE_SCOPE)
                return NULL;
 
-       buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
+       buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
        if (!buf) {
                pr_warn("Withdrawal distribution failure\n");
                return NULL;
@@ -160,19 +165,21 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
  * @dnode: node to be updated
  * @pls: linked list of publication items to be packed into buffer chain
  */
-static void named_distribute(struct sk_buff_head *list, u32 dnode,
-                            struct list_head *pls)
+static void named_distribute(struct net *net, struct sk_buff_head *list,
+                            u32 dnode, struct list_head *pls)
 {
        struct publication *publ;
        struct sk_buff *skb = NULL;
        struct distr_item *item = NULL;
-       uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE;
+       uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) *
+                       ITEM_SIZE;
        uint msg_rem = msg_dsz;
 
        list_for_each_entry(publ, pls, local_list) {
                /* Prepare next buffer: */
                if (!skb) {
-                       skb = named_prepare_buf(PUBLICATION, msg_rem, dnode);
+                       skb = named_prepare_buf(net, PUBLICATION, msg_rem,
+                                               dnode);
                        if (!skb) {
                                pr_warn("Bulk publication failure\n");
                                return;
@@ -202,30 +209,32 @@ static void named_distribute(struct sk_buff_head *list, u32 dnode,
 /**
  * tipc_named_node_up - tell specified node about all publications by this node
  */
-void tipc_named_node_up(u32 dnode)
+void tipc_named_node_up(struct net *net, u32 dnode)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sk_buff_head head;
 
        __skb_queue_head_init(&head);
 
        rcu_read_lock();
-       named_distribute(&head, dnode,
-                        &tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
-       named_distribute(&head, dnode,
-                        &tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
+       named_distribute(net, &head, dnode,
+                        &tn->nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
+       named_distribute(net, &head, dnode,
+                        &tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
        rcu_read_unlock();
 
-       tipc_link_xmit(&head, dnode, dnode);
+       tipc_link_xmit(net, &head, dnode, dnode);
 }
 
-static void tipc_publ_subscribe(struct publication *publ, u32 addr)
+static void tipc_publ_subscribe(struct net *net, struct publication *publ,
+                               u32 addr)
 {
        struct tipc_node *node;
 
-       if (in_own_node(addr))
+       if (in_own_node(net, addr))
                return;
 
-       node = tipc_node_find(addr);
+       node = tipc_node_find(net, addr);
        if (!node) {
                pr_warn("Node subscription rejected, unknown node 0x%x\n",
                        addr);
@@ -237,11 +246,12 @@ static void tipc_publ_subscribe(struct publication *publ, u32 addr)
        tipc_node_unlock(node);
 }
 
-static void tipc_publ_unsubscribe(struct publication *publ, u32 addr)
+static void tipc_publ_unsubscribe(struct net *net, struct publication *publ,
+                                 u32 addr)
 {
        struct tipc_node *node;
 
-       node = tipc_node_find(addr);
+       node = tipc_node_find(net, addr);
        if (!node)
                return;
 
@@ -256,16 +266,17 @@ static void tipc_publ_unsubscribe(struct publication *publ, u32 addr)
  * Invoked for each publication issued by a newly failed node.
  * Removes publication structure from name table & deletes it.
  */
-static void tipc_publ_purge(struct publication *publ, u32 addr)
+static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct publication *p;
 
-       spin_lock_bh(&tipc_nametbl_lock);
-       p = tipc_nametbl_remove_publ(publ->type, publ->lower,
+       spin_lock_bh(&tn->nametbl_lock);
+       p = tipc_nametbl_remove_publ(net, publ->type, publ->lower,
                                     publ->node, publ->ref, publ->key);
        if (p)
-               tipc_publ_unsubscribe(p, addr);
-       spin_unlock_bh(&tipc_nametbl_lock);
+               tipc_publ_unsubscribe(net, p, addr);
+       spin_unlock_bh(&tn->nametbl_lock);
 
        if (p != publ) {
                pr_err("Unable to remove publication from failed node\n"
@@ -277,12 +288,12 @@ static void tipc_publ_purge(struct publication *publ, u32 addr)
        kfree_rcu(p, rcu);
 }
 
-void tipc_publ_notify(struct list_head *nsub_list, u32 addr)
+void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
 {
        struct publication *publ, *tmp;
 
        list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
-               tipc_publ_purge(publ, addr);
+               tipc_publ_purge(net, publ, addr);
 }
 
 /**
@@ -292,25 +303,28 @@ void tipc_publ_notify(struct list_head *nsub_list, u32 addr)
  * tipc_nametbl_lock must be held.
  * Returns the publication item if successful, otherwise NULL.
  */
-static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype)
+static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
+                               u32 node, u32 dtype)
 {
        struct publication *publ = NULL;
 
        if (dtype == PUBLICATION) {
-               publ = tipc_nametbl_insert_publ(ntohl(i->type), ntohl(i->lower),
+               publ = tipc_nametbl_insert_publ(net, ntohl(i->type),
+                                               ntohl(i->lower),
                                                ntohl(i->upper),
                                                TIPC_CLUSTER_SCOPE, node,
                                                ntohl(i->ref), ntohl(i->key));
                if (publ) {
-                       tipc_publ_subscribe(publ, node);
+                       tipc_publ_subscribe(net, publ, node);
                        return true;
                }
        } else if (dtype == WITHDRAWAL) {
-               publ = tipc_nametbl_remove_publ(ntohl(i->type), ntohl(i->lower),
+               publ = tipc_nametbl_remove_publ(net, ntohl(i->type),
+                                               ntohl(i->lower),
                                                node, ntohl(i->ref),
                                                ntohl(i->key));
                if (publ) {
-                       tipc_publ_unsubscribe(publ, node);
+                       tipc_publ_unsubscribe(net, publ, node);
                        kfree_rcu(publ, rcu);
                        return true;
                }
@@ -343,7 +357,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
  * tipc_named_process_backlog - try to process any pending name table updates
  * from the network.
  */
-void tipc_named_process_backlog(void)
+void tipc_named_process_backlog(struct net *net)
 {
        struct distr_queue_item *e, *tmp;
        char addr[16];
@@ -351,7 +365,7 @@ void tipc_named_process_backlog(void)
 
        list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
                if (time_after(e->expires, now)) {
-                       if (!tipc_update_nametbl(&e->i, e->node, e->dtype))
+                       if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
                                continue;
                } else {
                        tipc_addr_string_fill(addr, e->node);
@@ -367,24 +381,34 @@ void tipc_named_process_backlog(void)
 }
 
 /**
- * tipc_named_rcv - process name table update message sent by another node
+ * tipc_named_rcv - process name table update messages sent by another node
  */
-void tipc_named_rcv(struct sk_buff *buf)
+void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
 {
-       struct tipc_msg *msg = buf_msg(buf);
-       struct distr_item *item = (struct distr_item *)msg_data(msg);
-       u32 count = msg_data_sz(msg) / ITEM_SIZE;
-       u32 node = msg_orignode(msg);
-
-       spin_lock_bh(&tipc_nametbl_lock);
-       while (count--) {
-               if (!tipc_update_nametbl(item, node, msg_type(msg)))
-                       tipc_named_add_backlog(item, msg_type(msg), node);
-               item++;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_msg *msg;
+       struct distr_item *item;
+       uint count;
+       u32 node;
+       struct sk_buff *skb;
+       int mtype;
+
+       spin_lock_bh(&tn->nametbl_lock);
+       for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) {
+               msg = buf_msg(skb);
+               mtype = msg_type(msg);
+               item = (struct distr_item *)msg_data(msg);
+               count = msg_data_sz(msg) / ITEM_SIZE;
+               node = msg_orignode(msg);
+               while (count--) {
+                       if (!tipc_update_nametbl(net, item, node, mtype))
+                               tipc_named_add_backlog(item, mtype, node);
+                       item++;
+               }
+               kfree_skb(skb);
+               tipc_named_process_backlog(net);
        }
-       tipc_named_process_backlog();
-       spin_unlock_bh(&tipc_nametbl_lock);
-       kfree_skb(buf);
+       spin_unlock_bh(&tn->nametbl_lock);
 }
 
 /**
@@ -394,17 +418,18 @@ void tipc_named_rcv(struct sk_buff *buf)
  * All name table entries published by this node are updated to reflect
  * the node's new network address.
  */
-void tipc_named_reinit(void)
+void tipc_named_reinit(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct publication *publ;
        int scope;
 
-       spin_lock_bh(&tipc_nametbl_lock);
+       spin_lock_bh(&tn->nametbl_lock);
 
        for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
-               list_for_each_entry_rcu(publ, &tipc_nametbl->publ_list[scope],
+               list_for_each_entry_rcu(publ, &tn->nametbl->publ_list[scope],
                                        local_list)
-                       publ->node = tipc_own_addr;
+                       publ->node = tn->own_addr;
 
-       spin_unlock_bh(&tipc_nametbl_lock);
+       spin_unlock_bh(&tn->nametbl_lock);
 }
index cef55cedcfb29c76b8367486d1e28cb376118339..dd2d9fd80da2c59572c9cb8a6b74306c9fc70458 100644 (file)
@@ -67,13 +67,13 @@ struct distr_item {
        __be32 key;
 };
 
-struct sk_buff *tipc_named_publish(struct publication *publ);
-struct sk_buff *tipc_named_withdraw(struct publication *publ);
-void named_cluster_distribute(struct sk_buff *buf);
-void tipc_named_node_up(u32 dnode);
-void tipc_named_rcv(struct sk_buff *buf);
-void tipc_named_reinit(void);
-void tipc_named_process_backlog(void);
-void tipc_publ_notify(struct list_head *nsub_list, u32 addr);
+struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ);
+struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ);
+void named_cluster_distribute(struct net *net, struct sk_buff *buf);
+void tipc_named_node_up(struct net *net, u32 dnode);
+void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue);
+void tipc_named_reinit(struct net *net);
+void tipc_named_process_backlog(struct net *net);
+void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr);
 
 #endif
index c8df0223371a7b86f971093fee1ac1ba47bffb26..18a3d44238bcd6454569ccf287e9d9a13ee6bb6c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/name_table.c: TIPC name table code
  *
- * Copyright (c) 2000-2006, 2014, Ericsson AB
+ * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
  * Copyright (c) 2004-2008, 2010-2014, Wind River Systems
  * All rights reserved.
  *
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <net/sock.h>
 #include "core.h"
 #include "config.h"
 #include "name_table.h"
 #include "name_distr.h"
 #include "subscr.h"
+#include "bcast.h"
 
 #define TIPC_NAMETBL_SIZE 1024         /* must be a power of 2 */
 
@@ -105,9 +107,6 @@ struct name_seq {
        struct rcu_head rcu;
 };
 
-struct name_table *tipc_nametbl;
-DEFINE_SPINLOCK(tipc_nametbl_lock);
-
 static int hash(int x)
 {
        return x & (TIPC_NAMETBL_SIZE - 1);
@@ -228,9 +227,11 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
 /**
  * tipc_nameseq_insert_publ
  */
-static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
-                                                   u32 type, u32 lower, u32 upper,
-                                                   u32 scope, u32 node, u32 port, u32 key)
+static struct publication *tipc_nameseq_insert_publ(struct net *net,
+                                                   struct name_seq *nseq,
+                                                   u32 type, u32 lower,
+                                                   u32 upper, u32 scope,
+                                                   u32 node, u32 port, u32 key)
 {
        struct tipc_subscription *s;
        struct tipc_subscription *st;
@@ -315,12 +316,12 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
        list_add(&publ->zone_list, &info->zone_list);
        info->zone_list_size++;
 
-       if (in_own_cluster(node)) {
+       if (in_own_cluster(net, node)) {
                list_add(&publ->cluster_list, &info->cluster_list);
                info->cluster_list_size++;
        }
 
-       if (in_own_node(node)) {
+       if (in_own_node(net, node)) {
                list_add(&publ->node_list, &info->node_list);
                info->node_list_size++;
        }
@@ -349,8 +350,10 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
  * A failed withdraw request simply returns a failure indication and lets the
  * caller issue any error or warning messages associated with such a problem.
  */
-static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
-                                                   u32 node, u32 ref, u32 key)
+static struct publication *tipc_nameseq_remove_publ(struct net *net,
+                                                   struct name_seq *nseq,
+                                                   u32 inst, u32 node,
+                                                   u32 ref, u32 key)
 {
        struct publication *publ;
        struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
@@ -378,13 +381,13 @@ found:
        info->zone_list_size--;
 
        /* Remove publication from cluster scope list, if present */
-       if (in_own_cluster(node)) {
+       if (in_own_cluster(net, node)) {
                list_del(&publ->cluster_list);
                info->cluster_list_size--;
        }
 
        /* Remove publication from node scope list, if present */
-       if (in_own_node(node)) {
+       if (in_own_node(net, node)) {
                list_del(&publ->node_list);
                info->node_list_size--;
        }
@@ -447,12 +450,13 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
        }
 }
 
-static struct name_seq *nametbl_find_seq(u32 type)
+static struct name_seq *nametbl_find_seq(struct net *net, u32 type)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct hlist_head *seq_head;
        struct name_seq *ns;
 
-       seq_head = &tipc_nametbl->seq_hlist[hash(type)];
+       seq_head = &tn->nametbl->seq_hlist[hash(type)];
        hlist_for_each_entry_rcu(ns, seq_head, ns_list) {
                if (ns->type == type)
                        return ns;
@@ -461,11 +465,13 @@ static struct name_seq *nametbl_find_seq(u32 type)
        return NULL;
 };
 
-struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
-                                            u32 scope, u32 node, u32 port, u32 key)
+struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
+                                            u32 lower, u32 upper, u32 scope,
+                                            u32 node, u32 port, u32 key)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct publication *publ;
-       struct name_seq *seq = nametbl_find_seq(type);
+       struct name_seq *seq = nametbl_find_seq(net, type);
        int index = hash(type);
 
        if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
@@ -476,29 +482,29 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
        }
 
        if (!seq)
-               seq = tipc_nameseq_create(type,
-                                         &tipc_nametbl->seq_hlist[index]);
+               seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
        if (!seq)
                return NULL;
 
        spin_lock_bh(&seq->lock);
-       publ = tipc_nameseq_insert_publ(seq, type, lower, upper,
+       publ = tipc_nameseq_insert_publ(net, seq, type, lower, upper,
                                        scope, node, port, key);
        spin_unlock_bh(&seq->lock);
        return publ;
 }
 
-struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
-                                            u32 node, u32 ref, u32 key)
+struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
+                                            u32 lower, u32 node, u32 ref,
+                                            u32 key)
 {
        struct publication *publ;
-       struct name_seq *seq = nametbl_find_seq(type);
+       struct name_seq *seq = nametbl_find_seq(net, type);
 
        if (!seq)
                return NULL;
 
        spin_lock_bh(&seq->lock);
-       publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
+       publ = tipc_nameseq_remove_publ(net, seq, lower, node, ref, key);
        if (!seq->first_free && list_empty(&seq->subscriptions)) {
                hlist_del_init_rcu(&seq->ns_list);
                kfree(seq->sseqs);
@@ -523,8 +529,10 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
  * - if name translation is attempted and fails, sets 'destnode' to 0
  *   and returns 0
  */
-u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
+u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance,
+                          u32 *destnode)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct sub_seq *sseq;
        struct name_info *info;
        struct publication *publ;
@@ -532,11 +540,11 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
        u32 ref = 0;
        u32 node = 0;
 
-       if (!tipc_in_scope(*destnode, tipc_own_addr))
+       if (!tipc_in_scope(*destnode, tn->own_addr))
                return 0;
 
        rcu_read_lock();
-       seq = nametbl_find_seq(type);
+       seq = nametbl_find_seq(net, type);
        if (unlikely(!seq))
                goto not_found;
        spin_lock_bh(&seq->lock);
@@ -569,13 +577,13 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
        }
 
        /* Round-Robin Algorithm */
-       else if (*destnode == tipc_own_addr) {
+       else if (*destnode == tn->own_addr) {
                if (list_empty(&info->node_list))
                        goto no_match;
                publ = list_first_entry(&info->node_list, struct publication,
                                        node_list);
                list_move_tail(&publ->node_list, &info->node_list);
-       } else if (in_own_cluster_exact(*destnode)) {
+       } else if (in_own_cluster_exact(net, *destnode)) {
                if (list_empty(&info->cluster_list))
                        goto no_match;
                publ = list_first_entry(&info->cluster_list, struct publication,
@@ -609,8 +617,8 @@ not_found:
  *
  * Returns non-zero if any off-node ports overlap
  */
-int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
-                             struct tipc_port_list *dports)
+int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
+                             u32 limit, struct tipc_plist *dports)
 {
        struct name_seq *seq;
        struct sub_seq *sseq;
@@ -619,7 +627,7 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
        int res = 0;
 
        rcu_read_lock();
-       seq = nametbl_find_seq(type);
+       seq = nametbl_find_seq(net, type);
        if (!seq)
                goto exit;
 
@@ -635,7 +643,7 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
                info = sseq->info;
                list_for_each_entry(publ, &info->node_list, node_list) {
                        if (publ->scope <= limit)
-                               tipc_port_list_add(dports, publ->ref);
+                               tipc_plist_push(dports, publ->ref);
                }
 
                if (info->cluster_list_size != info->node_list_size)
@@ -650,50 +658,55 @@ exit:
 /*
  * tipc_nametbl_publish - add name publication to network name tables
  */
-struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
-                                        u32 scope, u32 port_ref, u32 key)
+struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
+                                        u32 upper, u32 scope, u32 port_ref,
+                                        u32 key)
 {
        struct publication *publ;
        struct sk_buff *buf = NULL;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-       spin_lock_bh(&tipc_nametbl_lock);
-       if (tipc_nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) {
+       spin_lock_bh(&tn->nametbl_lock);
+       if (tn->nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) {
                pr_warn("Publication failed, local publication limit reached (%u)\n",
                        TIPC_MAX_PUBLICATIONS);
-               spin_unlock_bh(&tipc_nametbl_lock);
+               spin_unlock_bh(&tn->nametbl_lock);
                return NULL;
        }
 
-       publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
-                                  tipc_own_addr, port_ref, key);
+       publ = tipc_nametbl_insert_publ(net, type, lower, upper, scope,
+                                       tn->own_addr, port_ref, key);
        if (likely(publ)) {
-               tipc_nametbl->local_publ_count++;
-               buf = tipc_named_publish(publ);
+               tn->nametbl->local_publ_count++;
+               buf = tipc_named_publish(net, publ);
                /* Any pending external events? */
-               tipc_named_process_backlog();
+               tipc_named_process_backlog(net);
        }
-       spin_unlock_bh(&tipc_nametbl_lock);
+       spin_unlock_bh(&tn->nametbl_lock);
 
        if (buf)
-               named_cluster_distribute(buf);
+               named_cluster_distribute(net, buf);
        return publ;
 }
 
 /**
  * tipc_nametbl_withdraw - withdraw name publication from network name tables
  */
-int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
+int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
+                         u32 key)
 {
        struct publication *publ;
        struct sk_buff *skb = NULL;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-       spin_lock_bh(&tipc_nametbl_lock);
-       publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
+       spin_lock_bh(&tn->nametbl_lock);
+       publ = tipc_nametbl_remove_publ(net, type, lower, tn->own_addr,
+                                       ref, key);
        if (likely(publ)) {
-               tipc_nametbl->local_publ_count--;
-               skb = tipc_named_withdraw(publ);
+               tn->nametbl->local_publ_count--;
+               skb = tipc_named_withdraw(net, publ);
                /* Any pending external events? */
-               tipc_named_process_backlog();
+               tipc_named_process_backlog(net);
                list_del_init(&publ->pport_list);
                kfree_rcu(publ, rcu);
        } else {
@@ -701,10 +714,10 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
                       "(type=%u, lower=%u, ref=%u, key=%u)\n",
                       type, lower, ref, key);
        }
-       spin_unlock_bh(&tipc_nametbl_lock);
+       spin_unlock_bh(&tn->nametbl_lock);
 
        if (skb) {
-               named_cluster_distribute(skb);
+               named_cluster_distribute(net, skb);
                return 1;
        }
        return 0;
@@ -715,15 +728,15 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
  */
 void tipc_nametbl_subscribe(struct tipc_subscription *s)
 {
+       struct tipc_net *tn = net_generic(s->net, tipc_net_id);
        u32 type = s->seq.type;
        int index = hash(type);
        struct name_seq *seq;
 
-       spin_lock_bh(&tipc_nametbl_lock);
-       seq = nametbl_find_seq(type);
+       spin_lock_bh(&tn->nametbl_lock);
+       seq = nametbl_find_seq(s->net, type);
        if (!seq)
-               seq = tipc_nameseq_create(type,
-                                         &tipc_nametbl->seq_hlist[index]);
+               seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
        if (seq) {
                spin_lock_bh(&seq->lock);
                tipc_nameseq_subscribe(seq, s);
@@ -732,7 +745,7 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
                pr_warn("Failed to create subscription for {%u,%u,%u}\n",
                        s->seq.type, s->seq.lower, s->seq.upper);
        }
-       spin_unlock_bh(&tipc_nametbl_lock);
+       spin_unlock_bh(&tn->nametbl_lock);
 }
 
 /**
@@ -740,10 +753,11 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
  */
 void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
 {
+       struct tipc_net *tn = net_generic(s->net, tipc_net_id);
        struct name_seq *seq;
 
-       spin_lock_bh(&tipc_nametbl_lock);
-       seq = nametbl_find_seq(s->seq.type);
+       spin_lock_bh(&tn->nametbl_lock);
+       seq = nametbl_find_seq(s->net, s->seq.type);
        if (seq != NULL) {
                spin_lock_bh(&seq->lock);
                list_del_init(&s->nameseq_list);
@@ -756,7 +770,7 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
                        spin_unlock_bh(&seq->lock);
                }
        }
-       spin_unlock_bh(&tipc_nametbl_lock);
+       spin_unlock_bh(&tn->nametbl_lock);
 }
 
 /**
@@ -858,9 +872,10 @@ static int nametbl_header(char *buf, int len, u32 depth)
 /**
  * nametbl_list - print specified name table contents into the given buffer
  */
-static int nametbl_list(char *buf, int len, u32 depth_info,
+static int nametbl_list(struct net *net, char *buf, int len, u32 depth_info,
                        u32 type, u32 lowbound, u32 upbound)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct hlist_head *seq_head;
        struct name_seq *seq;
        int all_types;
@@ -880,7 +895,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
                lowbound = 0;
                upbound = ~0;
                for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
-                       seq_head = &tipc_nametbl->seq_hlist[i];
+                       seq_head = &tn->nametbl->seq_hlist[i];
                        hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
                                ret += nameseq_list(seq, buf + ret, len - ret,
                                                   depth, seq->type,
@@ -896,7 +911,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
                }
                ret += nametbl_header(buf + ret, len - ret, depth);
                i = hash(type);
-               seq_head = &tipc_nametbl->seq_hlist[i];
+               seq_head = &tn->nametbl->seq_hlist[i];
                hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
                        if (seq->type == type) {
                                ret += nameseq_list(seq, buf + ret, len - ret,
@@ -909,7 +924,8 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
        return ret;
 }
 
-struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_nametbl_get(struct net *net, const void *req_tlv_area,
+                                int req_tlv_space)
 {
        struct sk_buff *buf;
        struct tipc_name_table_query *argv;
@@ -930,7 +946,7 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
        pb_len = ULTRA_STRING_MAX_LEN;
        argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
        rcu_read_lock();
-       str_len = nametbl_list(pb, pb_len, ntohl(argv->depth),
+       str_len = nametbl_list(net, pb, pb_len, ntohl(argv->depth),
                               ntohl(argv->type),
                               ntohl(argv->lowbound), ntohl(argv->upbound));
        rcu_read_unlock();
@@ -941,8 +957,10 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
        return buf;
 }
 
-int tipc_nametbl_init(void)
+int tipc_nametbl_init(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct name_table *tipc_nametbl;
        int i;
 
        tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC);
@@ -955,6 +973,8 @@ int tipc_nametbl_init(void)
        INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
        INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
        INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_NODE_SCOPE]);
+       tn->nametbl = tipc_nametbl;
+       spin_lock_init(&tn->nametbl_lock);
        return 0;
 }
 
@@ -963,7 +983,7 @@ int tipc_nametbl_init(void)
  *
  * tipc_nametbl_lock must be held when calling this function
  */
-static void tipc_purge_publications(struct name_seq *seq)
+static void tipc_purge_publications(struct net *net, struct name_seq *seq)
 {
        struct publication *publ, *safe;
        struct sub_seq *sseq;
@@ -973,8 +993,8 @@ static void tipc_purge_publications(struct name_seq *seq)
        sseq = seq->sseqs;
        info = sseq->info;
        list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
-               tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
-                                        publ->ref, publ->key);
+               tipc_nametbl_remove_publ(net, publ->type, publ->lower,
+                                        publ->node, publ->ref, publ->key);
                kfree_rcu(publ, rcu);
        }
        hlist_del_init_rcu(&seq->ns_list);
@@ -984,25 +1004,27 @@ static void tipc_purge_publications(struct name_seq *seq)
        kfree_rcu(seq, rcu);
 }
 
-void tipc_nametbl_stop(void)
+void tipc_nametbl_stop(struct net *net)
 {
        u32 i;
        struct name_seq *seq;
        struct hlist_head *seq_head;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct name_table *tipc_nametbl = tn->nametbl;
 
        /* Verify name table is empty and purge any lingering
         * publications, then release the name table
         */
-       spin_lock_bh(&tipc_nametbl_lock);
+       spin_lock_bh(&tn->nametbl_lock);
        for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
                if (hlist_empty(&tipc_nametbl->seq_hlist[i]))
                        continue;
                seq_head = &tipc_nametbl->seq_hlist[i];
                hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
-                       tipc_purge_publications(seq);
+                       tipc_purge_publications(net, seq);
                }
        }
-       spin_unlock_bh(&tipc_nametbl_lock);
+       spin_unlock_bh(&tn->nametbl_lock);
 
        synchronize_net();
        kfree(tipc_nametbl);
@@ -1106,9 +1128,10 @@ static int __tipc_nl_subseq_list(struct tipc_nl_msg *msg, struct name_seq *seq,
        return 0;
 }
 
-static int __tipc_nl_seq_list(struct tipc_nl_msg *msg, u32 *last_type,
-                             u32 *last_lower, u32 *last_publ)
+static int tipc_nl_seq_list(struct net *net, struct tipc_nl_msg *msg,
+                           u32 *last_type, u32 *last_lower, u32 *last_publ)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct hlist_head *seq_head;
        struct name_seq *seq = NULL;
        int err;
@@ -1120,10 +1143,10 @@ static int __tipc_nl_seq_list(struct tipc_nl_msg *msg, u32 *last_type,
                i = 0;
 
        for (; i < TIPC_NAMETBL_SIZE; i++) {
-               seq_head = &tipc_nametbl->seq_hlist[i];
+               seq_head = &tn->nametbl->seq_hlist[i];
 
                if (*last_type) {
-                       seq = nametbl_find_seq(*last_type);
+                       seq = nametbl_find_seq(net, *last_type);
                        if (!seq)
                                return -EPIPE;
                } else {
@@ -1157,6 +1180,7 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
        u32 last_type = cb->args[0];
        u32 last_lower = cb->args[1];
        u32 last_publ = cb->args[2];
+       struct net *net = sock_net(skb->sk);
        struct tipc_nl_msg msg;
 
        if (done)
@@ -1167,7 +1191,7 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
        msg.seq = cb->nlh->nlmsg_seq;
 
        rcu_read_lock();
-       err = __tipc_nl_seq_list(&msg, &last_type, &last_lower, &last_publ);
+       err = tipc_nl_seq_list(net, &msg, &last_type, &last_lower, &last_publ);
        if (!err) {
                done = 1;
        } else if (err != -EMSGSIZE) {
@@ -1188,3 +1212,41 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
        return skb->len;
 }
+
+void tipc_plist_push(struct tipc_plist *pl, u32 port)
+{
+       struct tipc_plist *nl;
+
+       if (likely(!pl->port)) {
+               pl->port = port;
+               return;
+       }
+       if (pl->port == port)
+               return;
+       list_for_each_entry(nl, &pl->list, list) {
+               if (nl->port == port)
+                       return;
+       }
+       nl = kmalloc(sizeof(*nl), GFP_ATOMIC);
+       if (nl) {
+               nl->port = port;
+               list_add(&nl->list, &pl->list);
+       }
+}
+
+u32 tipc_plist_pop(struct tipc_plist *pl)
+{
+       struct tipc_plist *nl;
+       u32 port = 0;
+
+       if (likely(list_empty(&pl->list))) {
+               port = pl->port;
+               pl->port = 0;
+               return port;
+       }
+       nl = list_first_entry(&pl->list, typeof(*nl), list);
+       port = nl->port;
+       list_del(&nl->list);
+       kfree(nl);
+       return port;
+}
index 5f0dee92010d24e3d1d8757390c255e119173e82..0304ddc6b1015740e3040b54b7f74484e701419a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/name_table.h: Include file for TIPC name table code
  *
- * Copyright (c) 2000-2006, 2014, Ericsson AB
+ * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
  * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
@@ -38,7 +38,7 @@
 #define _TIPC_NAME_TABLE_H
 
 struct tipc_subscription;
-struct tipc_port_list;
+struct tipc_plist;
 
 /*
  * TIPC name types reserved for internal TIPC use (both current and planned)
@@ -95,26 +95,41 @@ struct name_table {
        u32 local_publ_count;
 };
 
-extern spinlock_t tipc_nametbl_lock;
-extern struct name_table *tipc_nametbl;
-
 int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
-struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
-u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
-int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
-                             struct tipc_port_list *dports);
-struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
-                                        u32 scope, u32 port_ref, u32 key);
-int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
-struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
-                                            u32 scope, u32 node, u32 ref,
+struct sk_buff *tipc_nametbl_get(struct net *net, const void *req_tlv_area,
+                                int req_tlv_space);
+u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *node);
+int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
+                             u32 limit, struct tipc_plist *dports);
+struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
+                                        u32 upper, u32 scope, u32 port_ref,
+                                        u32 key);
+int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
+                         u32 key);
+struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
+                                            u32 lower, u32 upper, u32 scope,
+                                            u32 node, u32 ref, u32 key);
+struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
+                                            u32 lower, u32 node, u32 ref,
                                             u32 key);
-struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, u32 node,
-                                            u32 ref, u32 key);
 void tipc_nametbl_subscribe(struct tipc_subscription *s);
 void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
-int tipc_nametbl_init(void);
-void tipc_nametbl_stop(void);
+int tipc_nametbl_init(struct net *net);
+void tipc_nametbl_stop(struct net *net);
+
+struct tipc_plist {
+       struct list_head list;
+       u32 port;
+};
+
+static inline void tipc_plist_init(struct tipc_plist *pl)
+{
+       INIT_LIST_HEAD(&pl->list);
+       pl->port = 0;
+}
+
+void tipc_plist_push(struct tipc_plist *pl, u32 port);
+u32 tipc_plist_pop(struct tipc_plist *pl);
 
 #endif
index cf13df3cde8f9ec48ddb8ee2f326e2caad9a25de..263267e0e7fe463370f5c6d7e092419df0e922b0 100644 (file)
@@ -41,6 +41,7 @@
 #include "socket.h"
 #include "node.h"
 #include "config.h"
+#include "bcast.h"
 
 static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
        [TIPC_NLA_NET_UNSPEC]   = { .type = NLA_UNSPEC },
@@ -108,44 +109,50 @@ static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
  *     - A local spin_lock protecting the queue of subscriber events.
 */
 
-int tipc_net_start(u32 addr)
+int tipc_net_start(struct net *net, u32 addr)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        char addr_string[16];
        int res;
 
-       tipc_own_addr = addr;
-       tipc_named_reinit();
-       tipc_sk_reinit();
-       res = tipc_bclink_init();
+       tn->own_addr = addr;
+       tipc_named_reinit(net);
+       tipc_sk_reinit(net);
+       res = tipc_bclink_init(net);
        if (res)
                return res;
 
-       tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr,
-                            TIPC_ZONE_SCOPE, 0, tipc_own_addr);
+       tipc_nametbl_publish(net, TIPC_CFG_SRV, tn->own_addr, tn->own_addr,
+                            TIPC_ZONE_SCOPE, 0, tn->own_addr);
 
        pr_info("Started in network mode\n");
        pr_info("Own node address %s, network identity %u\n",
-               tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
+               tipc_addr_string_fill(addr_string, tn->own_addr),
+               tn->net_id);
        return 0;
 }
 
-void tipc_net_stop(void)
+void tipc_net_stop(struct net *net)
 {
-       if (!tipc_own_addr)
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       if (!tn->own_addr)
                return;
 
-       tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr);
+       tipc_nametbl_withdraw(net, TIPC_CFG_SRV, tn->own_addr, 0,
+                             tn->own_addr);
        rtnl_lock();
-       tipc_bearer_stop();
-       tipc_bclink_stop();
-       tipc_node_stop();
+       tipc_bearer_stop(net);
+       tipc_bclink_stop(net);
+       tipc_node_stop(net);
        rtnl_unlock();
 
        pr_info("Left network mode\n");
 }
 
-static int __tipc_nl_add_net(struct tipc_nl_msg *msg)
+static int __tipc_nl_add_net(struct net *net, struct tipc_nl_msg *msg)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        void *hdr;
        struct nlattr *attrs;
 
@@ -158,7 +165,7 @@ static int __tipc_nl_add_net(struct tipc_nl_msg *msg)
        if (!attrs)
                goto msg_full;
 
-       if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tipc_net_id))
+       if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tn->net_id))
                goto attr_msg_full;
 
        nla_nest_end(msg->skb, attrs);
@@ -176,6 +183,7 @@ msg_full:
 
 int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct net *net = sock_net(skb->sk);
        int err;
        int done = cb->args[0];
        struct tipc_nl_msg msg;
@@ -187,7 +195,7 @@ int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
        msg.portid = NETLINK_CB(cb->skb).portid;
        msg.seq = cb->nlh->nlmsg_seq;
 
-       err = __tipc_nl_add_net(&msg);
+       err = __tipc_nl_add_net(net, &msg);
        if (err)
                goto out;
 
@@ -200,8 +208,10 @@ out:
 
 int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
 {
-       int err;
+       struct net *net = genl_info_net(info);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
+       int err;
 
        if (!info->attrs[TIPC_NLA_NET])
                return -EINVAL;
@@ -216,21 +226,21 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
                u32 val;
 
                /* Can't change net id once TIPC has joined a network */
-               if (tipc_own_addr)
+               if (tn->own_addr)
                        return -EPERM;
 
                val = nla_get_u32(attrs[TIPC_NLA_NET_ID]);
                if (val < 1 || val > 9999)
                        return -EINVAL;
 
-               tipc_net_id = val;
+               tn->net_id = val;
        }
 
        if (attrs[TIPC_NLA_NET_ADDR]) {
                u32 addr;
 
                /* Can't change net addr once TIPC has joined a network */
-               if (tipc_own_addr)
+               if (tn->own_addr)
                        return -EPERM;
 
                addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
@@ -238,7 +248,7 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
                        return -EINVAL;
 
                rtnl_lock();
-               tipc_net_start(addr);
+               tipc_net_start(net, addr);
                rtnl_unlock();
        }
 
index a81c1b9eb150630706a807679d841afaa837cec3..77a7a118911d1b4f6b21dacee9795d2ca657c319 100644 (file)
@@ -39,9 +39,9 @@
 
 #include <net/genetlink.h>
 
-int tipc_net_start(u32 addr);
+int tipc_net_start(struct net *net, u32 addr);
 
-void tipc_net_stop(void);
+void tipc_net_stop(struct net *net);
 
 int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
index b891e3905bc42255e7bfc88de0a98af05238895a..fe0f5134ce155fadbfe3c90f97d5f8533fff4bc2 100644 (file)
@@ -46,6 +46,7 @@
 
 static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
 {
+       struct net *net = genl_info_net(info);
        struct sk_buff *rep_buf;
        struct nlmsghdr *rep_nlh;
        struct nlmsghdr *req_nlh = info->nlhdr;
@@ -53,22 +54,24 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
        int hdr_space = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
        u16 cmd;
 
-       if ((req_userhdr->cmd & 0xC000) && (!netlink_capable(skb, CAP_NET_ADMIN)))
+       if ((req_userhdr->cmd & 0xC000) &&
+           (!netlink_net_capable(skb, CAP_NET_ADMIN)))
                cmd = TIPC_CMD_NOT_NET_ADMIN;
        else
                cmd = req_userhdr->cmd;
 
-       rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, cmd,
-                       nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
-                       nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
-                       hdr_space);
+       rep_buf = tipc_cfg_do_cmd(net, req_userhdr->dest, cmd,
+                                 nlmsg_data(req_nlh) + GENL_HDRLEN +
+                                 TIPC_GENL_HDRLEN,
+                                 nlmsg_attrlen(req_nlh, GENL_HDRLEN +
+                                 TIPC_GENL_HDRLEN), hdr_space);
 
        if (rep_buf) {
                skb_push(rep_buf, hdr_space);
                rep_nlh = nlmsg_hdr(rep_buf);
                memcpy(rep_nlh, req_nlh, hdr_space);
                rep_nlh->nlmsg_len = rep_buf->len;
-               genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).portid);
+               genlmsg_unicast(net, rep_buf, NETLINK_CB(skb).portid);
        }
 
        return 0;
@@ -93,6 +96,7 @@ static struct genl_family tipc_genl_family = {
        .version        = TIPC_GENL_VERSION,
        .hdrsize        = TIPC_GENL_HDRLEN,
        .maxattr        = 0,
+       .netnsok        = true,
 };
 
 /* Legacy ASCII API */
@@ -112,6 +116,7 @@ struct genl_family tipc_genl_v2_family = {
        .version        = TIPC_GENL_V2_VERSION,
        .hdrsize        = 0,
        .maxattr        = TIPC_NLA_MAX,
+       .netnsok        = true,
 };
 
 static const struct genl_ops tipc_genl_v2_ops[] = {
index 1425c6869de0234d00488e533d25c4ecaf0541e1..ae2f2d923a15d107606f5604c80926831238275a 100644 (file)
@@ -45,4 +45,7 @@ struct tipc_nl_msg {
        u32 seq;
 };
 
+int tipc_netlink_start(void);
+void tipc_netlink_stop(void);
+
 #endif
index 8d353ec77a6661820066e4cbee0d74cfe534ea08..52308498f2088ec4c1a017a33d70523a6b87c258 100644 (file)
 #include "name_distr.h"
 #include "socket.h"
 
-#define NODE_HTABLE_SIZE 512
-
 static void node_lost_contact(struct tipc_node *n_ptr);
 static void node_established_contact(struct tipc_node *n_ptr);
 
-static struct hlist_head node_htable[NODE_HTABLE_SIZE];
-LIST_HEAD(tipc_node_list);
-static u32 tipc_num_nodes;
-static u32 tipc_num_links;
-static DEFINE_SPINLOCK(node_list_lock);
-
 struct tipc_sock_conn {
        u32 port;
        u32 peer_port;
@@ -78,15 +70,17 @@ static unsigned int tipc_hashfn(u32 addr)
 /*
  * tipc_node_find - locate specified node object, if it exists
  */
-struct tipc_node *tipc_node_find(u32 addr)
+struct tipc_node *tipc_node_find(struct net *net, u32 addr)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *node;
 
-       if (unlikely(!in_own_cluster_exact(addr)))
+       if (unlikely(!in_own_cluster_exact(net, addr)))
                return NULL;
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) {
+       hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)],
+                                hash) {
                if (node->addr == addr) {
                        rcu_read_unlock();
                        return node;
@@ -96,72 +90,71 @@ struct tipc_node *tipc_node_find(u32 addr)
        return NULL;
 }
 
-struct tipc_node *tipc_node_create(u32 addr)
+struct tipc_node *tipc_node_create(struct net *net, u32 addr)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *n_ptr, *temp_node;
 
-       spin_lock_bh(&node_list_lock);
-
+       spin_lock_bh(&tn->node_list_lock);
+       n_ptr = tipc_node_find(net, addr);
+       if (n_ptr)
+               goto exit;
        n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
        if (!n_ptr) {
-               spin_unlock_bh(&node_list_lock);
                pr_warn("Node creation failed, no memory\n");
-               return NULL;
+               goto exit;
        }
-
        n_ptr->addr = addr;
+       n_ptr->net = net;
        spin_lock_init(&n_ptr->lock);
        INIT_HLIST_NODE(&n_ptr->hash);
        INIT_LIST_HEAD(&n_ptr->list);
        INIT_LIST_HEAD(&n_ptr->publ_list);
        INIT_LIST_HEAD(&n_ptr->conn_sks);
-       skb_queue_head_init(&n_ptr->waiting_sks);
        __skb_queue_head_init(&n_ptr->bclink.deferred_queue);
-
-       hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
-
-       list_for_each_entry_rcu(temp_node, &tipc_node_list, list) {
+       hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
+       list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
                if (n_ptr->addr < temp_node->addr)
                        break;
        }
        list_add_tail_rcu(&n_ptr->list, &temp_node->list);
        n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
        n_ptr->signature = INVALID_NODE_SIG;
-
-       tipc_num_nodes++;
-
-       spin_unlock_bh(&node_list_lock);
+       tn->num_nodes++;
+exit:
+       spin_unlock_bh(&tn->node_list_lock);
        return n_ptr;
 }
 
-static void tipc_node_delete(struct tipc_node *n_ptr)
+static void tipc_node_delete(struct tipc_net *tn, struct tipc_node *n_ptr)
 {
        list_del_rcu(&n_ptr->list);
        hlist_del_rcu(&n_ptr->hash);
        kfree_rcu(n_ptr, rcu);
 
-       tipc_num_nodes--;
+       tn->num_nodes--;
 }
 
-void tipc_node_stop(void)
+void tipc_node_stop(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *node, *t_node;
 
-       spin_lock_bh(&node_list_lock);
-       list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
-               tipc_node_delete(node);
-       spin_unlock_bh(&node_list_lock);
+       spin_lock_bh(&tn->node_list_lock);
+       list_for_each_entry_safe(node, t_node, &tn->node_list, list)
+               tipc_node_delete(tn, node);
+       spin_unlock_bh(&tn->node_list_lock);
 }
 
-int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port)
+int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
 {
        struct tipc_node *node;
        struct tipc_sock_conn *conn;
 
-       if (in_own_node(dnode))
+       if (in_own_node(net, dnode))
                return 0;
 
-       node = tipc_node_find(dnode);
+       node = tipc_node_find(net, dnode);
        if (!node) {
                pr_warn("Connecting sock to node 0x%x failed\n", dnode);
                return -EHOSTUNREACH;
@@ -179,15 +172,15 @@ int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port)
        return 0;
 }
 
-void tipc_node_remove_conn(u32 dnode, u32 port)
+void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
 {
        struct tipc_node *node;
        struct tipc_sock_conn *conn, *safe;
 
-       if (in_own_node(dnode))
+       if (in_own_node(net, dnode))
                return;
 
-       node = tipc_node_find(dnode);
+       node = tipc_node_find(net, dnode);
        if (!node)
                return;
 
@@ -201,23 +194,6 @@ void tipc_node_remove_conn(u32 dnode, u32 port)
        tipc_node_unlock(node);
 }
 
-void tipc_node_abort_sock_conns(struct list_head *conns)
-{
-       struct tipc_sock_conn *conn, *safe;
-       struct sk_buff *buf;
-
-       list_for_each_entry_safe(conn, safe, conns, list) {
-               buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
-                                     SHORT_H_SIZE, 0, tipc_own_addr,
-                                     conn->peer_node, conn->port,
-                                     conn->peer_port, TIPC_ERR_NO_NODE);
-               if (likely(buf))
-                       tipc_sk_rcv(buf);
-               list_del(&conn->list);
-               kfree(conn);
-       }
-}
-
 /**
  * tipc_node_link_up - handle addition of link
  *
@@ -231,8 +207,8 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
        n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP;
        n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
 
-       pr_info("Established link <%s> on network plane %c\n",
-               l_ptr->name, l_ptr->net_plane);
+       pr_debug("Established link <%s> on network plane %c\n",
+                l_ptr->name, l_ptr->net_plane);
 
        if (!active[0]) {
                active[0] = active[1] = l_ptr;
@@ -240,7 +216,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
                goto exit;
        }
        if (l_ptr->priority < active[0]->priority) {
-               pr_info("New link <%s> becomes standby\n", l_ptr->name);
+               pr_debug("New link <%s> becomes standby\n", l_ptr->name);
                goto exit;
        }
        tipc_link_dup_queue_xmit(active[0], l_ptr);
@@ -248,9 +224,9 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
                active[0] = l_ptr;
                goto exit;
        }
-       pr_info("Old link <%s> becomes standby\n", active[0]->name);
+       pr_debug("Old link <%s> becomes standby\n", active[0]->name);
        if (active[1] != active[0])
-               pr_info("Old link <%s> becomes standby\n", active[1]->name);
+               pr_debug("Old link <%s> becomes standby\n", active[1]->name);
        active[0] = active[1] = l_ptr;
 exit:
        /* Leave room for changeover header when returning 'mtu' to users: */
@@ -290,6 +266,7 @@ static void node_select_active_links(struct tipc_node *n_ptr)
  */
 void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
+       struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
        struct tipc_link **active;
 
        n_ptr->working_links--;
@@ -297,12 +274,12 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
        n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
 
        if (!tipc_link_is_active(l_ptr)) {
-               pr_info("Lost standby link <%s> on network plane %c\n",
-                       l_ptr->name, l_ptr->net_plane);
+               pr_debug("Lost standby link <%s> on network plane %c\n",
+                        l_ptr->name, l_ptr->net_plane);
                return;
        }
-       pr_info("Lost link <%s> on network plane %c\n",
-               l_ptr->name, l_ptr->net_plane);
+       pr_debug("Lost link <%s> on network plane %c\n",
+                l_ptr->name, l_ptr->net_plane);
 
        active = &n_ptr->active_links[0];
        if (active[0] == l_ptr)
@@ -324,7 +301,7 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
        }
 
        /* Loopback link went down? No fragmentation needed from now on. */
-       if (n_ptr->addr == tipc_own_addr) {
+       if (n_ptr->addr == tn->own_addr) {
                n_ptr->act_mtus[0] = MAX_MSG_SIZE;
                n_ptr->act_mtus[1] = MAX_MSG_SIZE;
        }
@@ -342,24 +319,27 @@ int tipc_node_is_up(struct tipc_node *n_ptr)
 
 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
+       struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
+
        n_ptr->links[l_ptr->bearer_id] = l_ptr;
-       spin_lock_bh(&node_list_lock);
-       tipc_num_links++;
-       spin_unlock_bh(&node_list_lock);
+       spin_lock_bh(&tn->node_list_lock);
+       tn->num_links++;
+       spin_unlock_bh(&tn->node_list_lock);
        n_ptr->link_cnt++;
 }
 
 void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
+       struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
        int i;
 
        for (i = 0; i < MAX_BEARERS; i++) {
                if (l_ptr != n_ptr->links[i])
                        continue;
                n_ptr->links[i] = NULL;
-               spin_lock_bh(&node_list_lock);
-               tipc_num_links--;
-               spin_unlock_bh(&node_list_lock);
+               spin_lock_bh(&tn->node_list_lock);
+               tn->num_links--;
+               spin_unlock_bh(&tn->node_list_lock);
                n_ptr->link_cnt--;
        }
 }
@@ -368,17 +348,21 @@ static void node_established_contact(struct tipc_node *n_ptr)
 {
        n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
        n_ptr->bclink.oos_state = 0;
-       n_ptr->bclink.acked = tipc_bclink_get_last_sent();
-       tipc_bclink_add_node(n_ptr->addr);
+       n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
+       tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
 }
 
 static void node_lost_contact(struct tipc_node *n_ptr)
 {
        char addr_string[16];
-       u32 i;
+       struct tipc_sock_conn *conn, *safe;
+       struct list_head *conns = &n_ptr->conn_sks;
+       struct sk_buff *skb;
+       struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
+       uint i;
 
-       pr_info("Lost contact with %s\n",
-               tipc_addr_string_fill(addr_string, n_ptr->addr));
+       pr_debug("Lost contact with %s\n",
+                tipc_addr_string_fill(addr_string, n_ptr->addr));
 
        /* Flush broadcast link info associated with lost node */
        if (n_ptr->bclink.recv_permitted) {
@@ -389,7 +373,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
                        n_ptr->bclink.reasm_buf = NULL;
                }
 
-               tipc_bclink_remove_node(n_ptr->addr);
+               tipc_bclink_remove_node(n_ptr->net, n_ptr->addr);
                tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
 
                n_ptr->bclink.recv_permitted = false;
@@ -403,19 +387,39 @@ static void node_lost_contact(struct tipc_node *n_ptr)
                l_ptr->reset_checkpoint = l_ptr->next_in_no;
                l_ptr->exp_msg_count = 0;
                tipc_link_reset_fragments(l_ptr);
+
+               /* Link marked for deletion after failover? => do it now */
+               if (l_ptr->flags & LINK_STOPPED)
+                       tipc_link_delete(l_ptr);
        }
 
        n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
 
-       /* Notify subscribers and prevent re-contact with node until
-        * cleanup is done.
-        */
-       n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN |
-                              TIPC_NOTIFY_NODE_DOWN;
+       /* Prevent re-contact with node until cleanup is done */
+       n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN;
+
+       /* Notify publications from this node */
+       n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;
+
+       /* Notify sockets connected to node */
+       list_for_each_entry_safe(conn, safe, conns, list) {
+               skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
+                                     SHORT_H_SIZE, 0, tn->own_addr,
+                                     conn->peer_node, conn->port,
+                                     conn->peer_port, TIPC_ERR_NO_NODE);
+               if (likely(skb)) {
+                       skb_queue_tail(n_ptr->inputq, skb);
+                       n_ptr->action_flags |= TIPC_MSG_EVT;
+               }
+               list_del(&conn->list);
+               kfree(conn);
+       }
 }
 
-struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_node_get_nodes(struct net *net, const void *req_tlv_area,
+                                   int req_tlv_space)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        u32 domain;
        struct sk_buff *buf;
        struct tipc_node *n_ptr;
@@ -430,20 +434,20 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
                return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
                                                   " (network address)");
 
-       spin_lock_bh(&node_list_lock);
-       if (!tipc_num_nodes) {
-               spin_unlock_bh(&node_list_lock);
+       spin_lock_bh(&tn->node_list_lock);
+       if (!tn->num_nodes) {
+               spin_unlock_bh(&tn->node_list_lock);
                return tipc_cfg_reply_none();
        }
 
        /* For now, get space for all other nodes */
-       payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
+       payload_size = TLV_SPACE(sizeof(node_info)) * tn->num_nodes;
        if (payload_size > 32768u) {
-               spin_unlock_bh(&node_list_lock);
+               spin_unlock_bh(&tn->node_list_lock);
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                   " (too many nodes)");
        }
-       spin_unlock_bh(&node_list_lock);
+       spin_unlock_bh(&tn->node_list_lock);
 
        buf = tipc_cfg_reply_alloc(payload_size);
        if (!buf)
@@ -451,7 +455,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
 
        /* Add TLVs for all nodes in scope */
        rcu_read_lock();
-       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+       list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
                if (!tipc_in_scope(domain, n_ptr->addr))
                        continue;
                node_info.addr = htonl(n_ptr->addr);
@@ -463,8 +467,10 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
        return buf;
 }
 
-struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_node_get_links(struct net *net, const void *req_tlv_area,
+                                   int req_tlv_space)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        u32 domain;
        struct sk_buff *buf;
        struct tipc_node *n_ptr;
@@ -479,32 +485,32 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
                return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
                                                   " (network address)");
 
-       if (!tipc_own_addr)
+       if (!tn->own_addr)
                return tipc_cfg_reply_none();
 
-       spin_lock_bh(&node_list_lock);
+       spin_lock_bh(&tn->node_list_lock);
        /* Get space for all unicast links + broadcast link */
-       payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1));
+       payload_size = TLV_SPACE((sizeof(link_info)) * (tn->num_links + 1));
        if (payload_size > 32768u) {
-               spin_unlock_bh(&node_list_lock);
+               spin_unlock_bh(&tn->node_list_lock);
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                   " (too many links)");
        }
-       spin_unlock_bh(&node_list_lock);
+       spin_unlock_bh(&tn->node_list_lock);
 
        buf = tipc_cfg_reply_alloc(payload_size);
        if (!buf)
                return NULL;
 
        /* Add TLV for broadcast link */
-       link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
+       link_info.dest = htonl(tipc_cluster_mask(tn->own_addr));
        link_info.up = htonl(1);
        strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME);
        tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
 
        /* Add TLVs for any other links in scope */
        rcu_read_lock();
-       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+       list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
                u32 i;
 
                if (!tipc_in_scope(domain, n_ptr->addr))
@@ -534,10 +540,11 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
  *
  * Returns 0 on success
  */
-int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
+int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
+                          char *linkname, size_t len)
 {
        struct tipc_link *link;
-       struct tipc_node *node = tipc_node_find(addr);
+       struct tipc_node *node = tipc_node_find(net, addr);
 
        if ((bearer_id >= MAX_BEARERS) || !node)
                return -EINVAL;
@@ -554,58 +561,60 @@ int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
 
 void tipc_node_unlock(struct tipc_node *node)
 {
-       LIST_HEAD(nsub_list);
-       LIST_HEAD(conn_sks);
-       struct sk_buff_head waiting_sks;
+       struct net *net = node->net;
        u32 addr = 0;
-       int flags = node->action_flags;
+       u32 flags = node->action_flags;
        u32 link_id = 0;
+       struct list_head *publ_list;
+       struct sk_buff_head *inputq = node->inputq;
+       struct sk_buff_head *namedq;
 
-       if (likely(!flags)) {
+       if (likely(!flags || (flags == TIPC_MSG_EVT))) {
+               node->action_flags = 0;
                spin_unlock_bh(&node->lock);
+               if (flags == TIPC_MSG_EVT)
+                       tipc_sk_rcv(net, inputq);
                return;
        }
 
        addr = node->addr;
        link_id = node->link_id;
-       __skb_queue_head_init(&waiting_sks);
-
-       if (flags & TIPC_WAKEUP_USERS)
-               skb_queue_splice_init(&node->waiting_sks, &waiting_sks);
+       namedq = node->namedq;
+       publ_list = &node->publ_list;
 
-       if (flags & TIPC_NOTIFY_NODE_DOWN) {
-               list_replace_init(&node->publ_list, &nsub_list);
-               list_replace_init(&node->conn_sks, &conn_sks);
-       }
-       node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN |
-                               TIPC_NOTIFY_NODE_UP | TIPC_NOTIFY_LINK_UP |
-                               TIPC_NOTIFY_LINK_DOWN |
-                               TIPC_WAKEUP_BCAST_USERS);
+       node->action_flags &= ~(TIPC_MSG_EVT |
+                               TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
+                               TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
+                               TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
+                               TIPC_NAMED_MSG_EVT);
 
        spin_unlock_bh(&node->lock);
 
-       while (!skb_queue_empty(&waiting_sks))
-               tipc_sk_rcv(__skb_dequeue(&waiting_sks));
-
-       if (!list_empty(&conn_sks))
-               tipc_node_abort_sock_conns(&conn_sks);
-
-       if (!list_empty(&nsub_list))
-               tipc_publ_notify(&nsub_list, addr);
+       if (flags & TIPC_NOTIFY_NODE_DOWN)
+               tipc_publ_notify(net, publ_list, addr);
 
        if (flags & TIPC_WAKEUP_BCAST_USERS)
-               tipc_bclink_wakeup_users();
+               tipc_bclink_wakeup_users(net);
 
        if (flags & TIPC_NOTIFY_NODE_UP)
-               tipc_named_node_up(addr);
+               tipc_named_node_up(net, addr);
 
        if (flags & TIPC_NOTIFY_LINK_UP)
-               tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr,
+               tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
                                     TIPC_NODE_SCOPE, link_id, addr);
 
        if (flags & TIPC_NOTIFY_LINK_DOWN)
-               tipc_nametbl_withdraw(TIPC_LINK_STATE, addr,
+               tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
                                      link_id, addr);
+
+       if (flags & TIPC_MSG_EVT)
+               tipc_sk_rcv(net, inputq);
+
+       if (flags & TIPC_NAMED_MSG_EVT)
+               tipc_named_rcv(net, namedq);
+
+       if (flags & TIPC_BCAST_MSG_EVT)
+               tipc_bclink_input(net);
 }
 
 /* Caller should hold node lock for the passed node */
@@ -645,6 +654,8 @@ msg_full:
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
        int err;
+       struct net *net = sock_net(skb->sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        int done = cb->args[0];
        int last_addr = cb->args[1];
        struct tipc_node *node;
@@ -659,7 +670,7 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
        rcu_read_lock();
 
-       if (last_addr && !tipc_node_find(last_addr)) {
+       if (last_addr && !tipc_node_find(net, last_addr)) {
                rcu_read_unlock();
                /* We never set seq or call nl_dump_check_consistent() this
                 * means that setting prev_seq here will cause the consistence
@@ -671,7 +682,7 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
                return -EPIPE;
        }
 
-       list_for_each_entry_rcu(node, &tipc_node_list, list) {
+       list_for_each_entry_rcu(node, &tn->node_list, list) {
                if (last_addr) {
                        if (node->addr == last_addr)
                                last_addr = 0;
index cbe0e950f1ccb81a63a3e0a19719cdef6fb2c6c5..20ec13f9bede631d3aee5cff6521f775d7fdc321 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/node.h: Include file for TIPC node management routines
  *
- * Copyright (c) 2000-2006, 2014, Ericsson AB
+ * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
  * Copyright (c) 2005, 2010-2014, Wind River Systems
  * All rights reserved.
  *
 #include "bearer.h"
 #include "msg.h"
 
-/*
- * Out-of-range value for node signature
- */
-#define INVALID_NODE_SIG 0x10000
+/* Out-of-range value for node signature */
+#define INVALID_NODE_SIG       0x10000
+
+#define NODE_HTABLE_SIZE       512
 
 /* Flags used to take different actions according to flag type
  * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
  * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
  */
 enum {
+       TIPC_MSG_EVT                    = 1,
        TIPC_WAIT_PEER_LINKS_DOWN       = (1 << 1),
        TIPC_WAIT_OWN_LINKS_DOWN        = (1 << 2),
        TIPC_NOTIFY_NODE_DOWN           = (1 << 3),
        TIPC_NOTIFY_NODE_UP             = (1 << 4),
-       TIPC_WAKEUP_USERS               = (1 << 5),
-       TIPC_WAKEUP_BCAST_USERS         = (1 << 6),
-       TIPC_NOTIFY_LINK_UP             = (1 << 7),
-       TIPC_NOTIFY_LINK_DOWN           = (1 << 8)
+       TIPC_WAKEUP_BCAST_USERS         = (1 << 5),
+       TIPC_NOTIFY_LINK_UP             = (1 << 6),
+       TIPC_NOTIFY_LINK_DOWN           = (1 << 7),
+       TIPC_NAMED_MSG_EVT              = (1 << 8),
+       TIPC_BCAST_MSG_EVT              = (1 << 9)
 };
 
 /**
@@ -73,6 +75,7 @@ enum {
  * @oos_state: state tracker for handling OOS b'cast messages
  * @deferred_queue: deferred queue saved OOS b'cast message received from node
  * @reasm_buf: broadcast reassembly queue head from node
+ * @inputq_map: bitmap indicating which inqueues should be kicked
  * @recv_permitted: true if node is allowed to receive b'cast messages
  */
 struct tipc_node_bclink {
@@ -83,6 +86,7 @@ struct tipc_node_bclink {
        u32 deferred_size;
        struct sk_buff_head deferred_queue;
        struct sk_buff *reasm_buf;
+       int inputq_map;
        bool recv_permitted;
 };
 
@@ -90,7 +94,11 @@ struct tipc_node_bclink {
  * struct tipc_node - TIPC node structure
  * @addr: network address of node
  * @lock: spinlock governing access to structure
+ * @net: the applicable net namespace
  * @hash: links to adjacent nodes in unsorted hash chain
+ * @inputq: pointer to input queue containing messages for msg event
+ * @namedq: pointer to name table input queue with name table messages
+ * @curr_link: the link holding the node lock, if any
  * @active_links: pointers to active links to node
  * @links: pointers to all links to node
  * @action_flags: bit mask of different types of node actions
@@ -106,11 +114,14 @@ struct tipc_node_bclink {
 struct tipc_node {
        u32 addr;
        spinlock_t lock;
+       struct net *net;
        struct hlist_node hash;
+       struct sk_buff_head *inputq;
+       struct sk_buff_head *namedq;
        struct tipc_link *active_links[2];
        u32 act_mtus[2];
        struct tipc_link *links[MAX_BEARERS];
-       unsigned int action_flags;
+       int action_flags;
        struct tipc_node_bclink bclink;
        struct list_head list;
        int link_cnt;
@@ -118,28 +129,28 @@ struct tipc_node {
        u32 signature;
        u32 link_id;
        struct list_head publ_list;
-       struct sk_buff_head waiting_sks;
        struct list_head conn_sks;
        struct rcu_head rcu;
 };
 
-extern struct list_head tipc_node_list;
-
-struct tipc_node *tipc_node_find(u32 addr);
-struct tipc_node *tipc_node_create(u32 addr);
-void tipc_node_stop(void);
+struct tipc_node *tipc_node_find(struct net *net, u32 addr);
+struct tipc_node *tipc_node_create(struct net *net, u32 addr);
+void tipc_node_stop(struct net *net);
 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 int tipc_node_active_links(struct tipc_node *n_ptr);
 int tipc_node_is_up(struct tipc_node *n_ptr);
-struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
-struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
-int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len);
+struct sk_buff *tipc_node_get_links(struct net *net, const void *req_tlv_area,
+                                   int req_tlv_space);
+struct sk_buff *tipc_node_get_nodes(struct net *net, const void *req_tlv_area,
+                                   int req_tlv_space);
+int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node,
+                          char *linkname, size_t len);
 void tipc_node_unlock(struct tipc_node *node);
-int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port);
-void tipc_node_remove_conn(u32 dnode, u32 port);
+int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
+void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
 
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
@@ -154,12 +165,12 @@ static inline bool tipc_node_blocked(struct tipc_node *node)
                TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
 }
 
-static inline uint tipc_node_get_mtu(u32 addr, u32 selector)
+static inline uint tipc_node_get_mtu(struct net *net, u32 addr, u32 selector)
 {
        struct tipc_node *node;
        u32 mtu;
 
-       node = tipc_node_find(addr);
+       node = tipc_node_find(net, addr);
 
        if (likely(node))
                mtu = node->act_mtus[selector & 1];
index a538a02f869b0745000ab117d80791e6d9262c69..eadd4ed459051ddc776a82f4f2fcc9fe34ab89cd 100644 (file)
@@ -35,6 +35,7 @@
 
 #include "server.h"
 #include "core.h"
+#include "socket.h"
 #include <net/sock.h>
 
 /* Number of messages to send before rescheduling */
@@ -255,7 +256,8 @@ static int tipc_receive_from_sock(struct tipc_conn *con)
                goto out_close;
        }
 
-       s->tipc_conn_recvmsg(con->conid, &addr, con->usr_data, buf, ret);
+       s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid, &addr,
+                            con->usr_data, buf, ret);
 
        kmem_cache_free(s->rcvbuf_cache, buf);
 
@@ -307,7 +309,7 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
        struct socket *sock = NULL;
        int ret;
 
-       ret = tipc_sock_create_local(s->type, &sock);
+       ret = tipc_sock_create_local(s->net, s->type, &sock);
        if (ret < 0)
                return NULL;
        ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
index be817b0b547e87148a103284adc5285a66147c28..9015faedb1b0e7c9f4ea18b293aa3dc8b16f4bf3 100644 (file)
@@ -36,7 +36,9 @@
 #ifndef _TIPC_SERVER_H
 #define _TIPC_SERVER_H
 
-#include "core.h"
+#include <linux/idr.h>
+#include <linux/tipc.h>
+#include <net/net_namespace.h>
 
 #define TIPC_SERVER_NAME_LEN   32
 
@@ -45,6 +47,7 @@
  * @conn_idr: identifier set of connection
  * @idr_lock: protect the connection identifier set
  * @idr_in_use: amount of allocated identifier entry
+ * @net: network namspace instance
  * @rcvbuf_cache: memory cache of server receive buffer
  * @rcv_wq: receive workqueue
  * @send_wq: send workqueue
@@ -61,16 +64,18 @@ struct tipc_server {
        struct idr conn_idr;
        spinlock_t idr_lock;
        int idr_in_use;
+       struct net *net;
        struct kmem_cache *rcvbuf_cache;
        struct workqueue_struct *rcv_wq;
        struct workqueue_struct *send_wq;
        int max_rcvbuf_size;
-       void *(*tipc_conn_new) (int conid);
-       void (*tipc_conn_shutdown) (int conid, void *usr_data);
-       void (*tipc_conn_recvmsg) (int conid, struct sockaddr_tipc *addr,
-                                  void *usr_data, void *buf, size_t len);
+       void *(*tipc_conn_new)(int conid);
+       void (*tipc_conn_shutdown)(int conid, void *usr_data);
+       void (*tipc_conn_recvmsg)(struct net *net, int conid,
+                                 struct sockaddr_tipc *addr, void *usr_data,
+                                 void *buf, size_t len);
        struct sockaddr_tipc *saddr;
-       const char name[TIPC_SERVER_NAME_LEN];
+       char name[TIPC_SERVER_NAME_LEN];
        int imp;
        int type;
 };
index 4731cad99d1cc8896cc0d6734f6eb48af3135207..4a98d15a13239ff514e14d67e4ae39745f410c15 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/socket.c: TIPC socket API
  *
- * Copyright (c) 2001-2007, 2012-2014, Ericsson AB
+ * Copyright (c) 2001-2007, 2012-2015, Ericsson AB
  * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
  * All rights reserved.
  *
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <linux/rhashtable.h>
+#include <linux/jhash.h>
 #include "core.h"
 #include "name_table.h"
 #include "node.h"
 #include "link.h"
-#include <linux/export.h>
 #include "config.h"
+#include "name_distr.h"
 #include "socket.h"
 
-#define SS_LISTENING   -1      /* socket is listening */
-#define SS_READY       -2      /* socket is connectionless */
+#define SS_LISTENING           -1      /* socket is listening */
+#define SS_READY               -2      /* socket is connectionless */
 
-#define CONN_TIMEOUT_DEFAULT  8000     /* default connect timeout = 8s */
-#define CONN_PROBING_INTERVAL 3600000  /* [ms] => 1 h */
-#define TIPC_FWD_MSG         1
-#define TIPC_CONN_OK          0
-#define TIPC_CONN_PROBING     1
+#define CONN_TIMEOUT_DEFAULT   8000    /* default connect timeout = 8s */
+#define CONN_PROBING_INTERVAL  msecs_to_jiffies(3600000)  /* [ms] => 1 h */
+#define TIPC_FWD_MSG           1
+#define TIPC_CONN_OK           0
+#define TIPC_CONN_PROBING      1
+#define TIPC_MAX_PORT          0xffffffff
+#define TIPC_MIN_PORT          1
 
 /**
  * struct tipc_sock - TIPC socket structure
  * @conn_instance: TIPC instance used when connection was established
  * @published: non-zero if port has one or more associated names
  * @max_pkt: maximum packet size "hint" used when building messages sent by port
- * @ref: unique reference to port in TIPC object registry
+ * @portid: unique port identity in TIPC socket hash table
  * @phdr: preformatted message header used when sending messages
  * @port_list: adjacent ports in TIPC's global list of ports
  * @publications: list of publications for port
  * @pub_count: total # of publications port has made during its lifetime
  * @probing_state:
- * @probing_interval:
- * @timer:
- * @port: port - interacts with 'sk' and with the rest of the TIPC stack
- * @peer_name: the peer of the connection, if any
+ * @probing_intv:
  * @conn_timeout: the time we can wait for an unresponded setup request
  * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
  * @link_cong: non-zero if owner must sleep because of link congestion
  * @sent_unacked: # messages sent by socket, and not yet acked by peer
  * @rcv_unacked: # messages read by user, but not yet acked back to peer
+ * @node: hash table node
+ * @rcu: rcu struct for tipc_sock
  */
 struct tipc_sock {
        struct sock sk;
@@ -82,19 +85,20 @@ struct tipc_sock {
        u32 conn_instance;
        int published;
        u32 max_pkt;
-       u32 ref;
+       u32 portid;
        struct tipc_msg phdr;
        struct list_head sock_list;
        struct list_head publications;
        u32 pub_count;
        u32 probing_state;
-       u32 probing_interval;
-       struct timer_list timer;
+       unsigned long probing_intv;
        uint conn_timeout;
        atomic_t dupl_rcvcnt;
        bool link_cong;
        uint sent_unacked;
        uint rcv_unacked;
+       struct rhash_head node;
+       struct rcu_head rcu;
 };
 
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
@@ -103,16 +107,14 @@ static void tipc_write_space(struct sock *sk);
 static int tipc_release(struct socket *sock);
 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
-static void tipc_sk_timeout(unsigned long ref);
+static void tipc_sk_timeout(unsigned long data);
 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
                           struct tipc_name_seq const *seq);
 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
                            struct tipc_name_seq const *seq);
-static u32 tipc_sk_ref_acquire(struct tipc_sock *tsk);
-static void tipc_sk_ref_discard(u32 ref);
-static struct tipc_sock *tipc_sk_get(u32 ref);
-static struct tipc_sock *tipc_sk_get_next(u32 *ref);
-static void tipc_sk_put(struct tipc_sock *tsk);
+static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
+static int tipc_sk_insert(struct tipc_sock *tsk);
+static void tipc_sk_remove(struct tipc_sock *tsk);
 
 static const struct proto_ops packet_ops;
 static const struct proto_ops stream_ops;
@@ -174,6 +176,11 @@ static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
  *   - port reference
  */
 
+static u32 tsk_own_node(struct tipc_sock *tsk)
+{
+       return msg_prevnode(&tsk->phdr);
+}
+
 static u32 tsk_peer_node(struct tipc_sock *tsk)
 {
        return msg_destnode(&tsk->phdr);
@@ -246,10 +253,11 @@ static void tsk_rej_rx_queue(struct sock *sk)
 {
        struct sk_buff *skb;
        u32 dnode;
+       u32 own_node = tsk_own_node(tipc_sk(sk));
 
        while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
-               if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
-                       tipc_link_xmit_skb(skb, dnode, 0);
+               if (tipc_msg_reverse(own_node, skb, &dnode, TIPC_ERR_NO_PORT))
+                       tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0);
        }
 }
 
@@ -260,6 +268,7 @@ static void tsk_rej_rx_queue(struct sock *sk)
  */
 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
 {
+       struct tipc_net *tn = net_generic(sock_net(&tsk->sk), tipc_net_id);
        u32 peer_port = tsk_peer_port(tsk);
        u32 orig_node;
        u32 peer_node;
@@ -276,10 +285,10 @@ static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
        if (likely(orig_node == peer_node))
                return true;
 
-       if (!orig_node && (peer_node == tipc_own_addr))
+       if (!orig_node && (peer_node == tn->own_addr))
                return true;
 
-       if (!peer_node && (orig_node == tipc_own_addr))
+       if (!peer_node && (orig_node == tn->own_addr))
                return true;
 
        return false;
@@ -300,12 +309,12 @@ static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
 static int tipc_sk_create(struct net *net, struct socket *sock,
                          int protocol, int kern)
 {
+       struct tipc_net *tn;
        const struct proto_ops *ops;
        socket_state state;
        struct sock *sk;
        struct tipc_sock *tsk;
        struct tipc_msg *msg;
-       u32 ref;
 
        /* Validate arguments */
        if (unlikely(protocol != 0))
@@ -339,24 +348,23 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
                return -ENOMEM;
 
        tsk = tipc_sk(sk);
-       ref = tipc_sk_ref_acquire(tsk);
-       if (!ref) {
-               pr_warn("Socket create failed; reference table exhausted\n");
-               return -ENOMEM;
-       }
        tsk->max_pkt = MAX_PKT_DEFAULT;
-       tsk->ref = ref;
        INIT_LIST_HEAD(&tsk->publications);
        msg = &tsk->phdr;
-       tipc_msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
+       tn = net_generic(sock_net(sk), tipc_net_id);
+       tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
                      NAMED_H_SIZE, 0);
-       msg_set_origport(msg, ref);
 
        /* Finish initializing socket data structures */
        sock->ops = ops;
        sock->state = state;
        sock_init_data(sock, sk);
-       k_init_timer(&tsk->timer, (Handler)tipc_sk_timeout, ref);
+       if (tipc_sk_insert(tsk)) {
+               pr_warn("Socket create failed; port numbrer exhausted\n");
+               return -EINVAL;
+       }
+       msg_set_origport(msg, tsk->portid);
+       setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
        sk->sk_backlog_rcv = tipc_backlog_rcv;
        sk->sk_rcvbuf = sysctl_tipc_rmem[1];
        sk->sk_data_ready = tipc_data_ready;
@@ -384,7 +392,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
  *
  * Returns 0 on success, errno otherwise
  */
-int tipc_sock_create_local(int type, struct socket **res)
+int tipc_sock_create_local(struct net *net, int type, struct socket **res)
 {
        int rc;
 
@@ -393,7 +401,7 @@ int tipc_sock_create_local(int type, struct socket **res)
                pr_err("Failed to create kernel socket\n");
                return rc;
        }
-       tipc_sk_create(&init_net, *res, 0, 1);
+       tipc_sk_create(net, *res, 0, 1);
 
        return 0;
 }
@@ -442,6 +450,13 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
        return ret;
 }
 
+static void tipc_sk_callback(struct rcu_head *head)
+{
+       struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
+
+       sock_put(&tsk->sk);
+}
+
 /**
  * tipc_release - destroy a TIPC socket
  * @sock: socket to destroy
@@ -461,9 +476,10 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
 static int tipc_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
+       struct net *net;
        struct tipc_sock *tsk;
        struct sk_buff *skb;
-       u32 dnode;
+       u32 dnode, probing_state;
 
        /*
         * Exit if socket isn't fully initialized (occurs when a failed accept()
@@ -472,6 +488,7 @@ static int tipc_release(struct socket *sock)
        if (sk == NULL)
                return 0;
 
+       net = sock_net(sk);
        tsk = tipc_sk(sk);
        lock_sock(sk);
 
@@ -491,26 +508,29 @@ static int tipc_release(struct socket *sock)
                            (sock->state == SS_CONNECTED)) {
                                sock->state = SS_DISCONNECTING;
                                tsk->connected = 0;
-                               tipc_node_remove_conn(dnode, tsk->ref);
+                               tipc_node_remove_conn(net, dnode, tsk->portid);
                        }
-                       if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
-                               tipc_link_xmit_skb(skb, dnode, 0);
+                       if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
+                                            TIPC_ERR_NO_PORT))
+                               tipc_link_xmit_skb(net, skb, dnode, 0);
                }
        }
 
        tipc_sk_withdraw(tsk, 0, NULL);
-       tipc_sk_ref_discard(tsk->ref);
-       k_cancel_timer(&tsk->timer);
+       probing_state = tsk->probing_state;
+       if (del_timer_sync(&sk->sk_timer) &&
+           probing_state != TIPC_CONN_PROBING)
+               sock_put(sk);
+       tipc_sk_remove(tsk);
        if (tsk->connected) {
-               skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
-                                     SHORT_H_SIZE, 0, dnode, tipc_own_addr,
-                                     tsk_peer_port(tsk),
-                                     tsk->ref, TIPC_ERR_NO_PORT);
+               skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
+                                     TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
+                                     tsk_own_node(tsk), tsk_peer_port(tsk),
+                                     tsk->portid, TIPC_ERR_NO_PORT);
                if (skb)
-                       tipc_link_xmit_skb(skb, dnode, tsk->ref);
-               tipc_node_remove_conn(dnode, tsk->ref);
+                       tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+               tipc_node_remove_conn(net, dnode, tsk->portid);
        }
-       k_term_timer(&tsk->timer);
 
        /* Discard any remaining (connection-based) messages in receive queue */
        __skb_queue_purge(&sk->sk_receive_queue);
@@ -518,7 +538,8 @@ static int tipc_release(struct socket *sock)
        /* Reject any messages that accumulated in backlog queue */
        sock->state = SS_DISCONNECTING;
        release_sock(sk);
-       sock_put(sk);
+
+       call_rcu(&tsk->rcu, tipc_sk_callback);
        sock->sk = NULL;
 
        return 0;
@@ -602,6 +623,7 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
 {
        struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
        struct tipc_sock *tsk = tipc_sk(sock->sk);
+       struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
 
        memset(addr, 0, sizeof(*addr));
        if (peer) {
@@ -611,8 +633,8 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
                addr->addr.id.ref = tsk_peer_port(tsk);
                addr->addr.id.node = tsk_peer_node(tsk);
        } else {
-               addr->addr.id.ref = tsk->ref;
-               addr->addr.id.node = tipc_own_addr;
+               addr->addr.id.ref = tsk->portid;
+               addr->addr.id.node = tn->own_addr;
        }
 
        *uaddr_len = sizeof(*addr);
@@ -711,8 +733,11 @@ static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
                          struct msghdr *msg, size_t dsz, long timeo)
 {
        struct sock *sk = sock->sk;
-       struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
-       struct sk_buff_head head;
+       struct tipc_sock *tsk = tipc_sk(sk);
+       struct net *net = sock_net(sk);
+       struct tipc_msg *mhdr = &tsk->phdr;
+       struct sk_buff_head *pktchain = &sk->sk_write_queue;
+       struct iov_iter save = msg->msg_iter;
        uint mtu;
        int rc;
 
@@ -727,83 +752,97 @@ static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
 
 new_mtu:
        mtu = tipc_bclink_get_mtu();
-       __skb_queue_head_init(&head);
-       rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);
+       rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain);
        if (unlikely(rc < 0))
                return rc;
 
        do {
-               rc = tipc_bclink_xmit(&head);
+               rc = tipc_bclink_xmit(net, pktchain);
                if (likely(rc >= 0)) {
                        rc = dsz;
                        break;
                }
-               if (rc == -EMSGSIZE)
+               if (rc == -EMSGSIZE) {
+                       msg->msg_iter = save;
                        goto new_mtu;
+               }
                if (rc != -ELINKCONG)
                        break;
                tipc_sk(sk)->link_cong = 1;
                rc = tipc_wait_for_sndmsg(sock, &timeo);
                if (rc)
-                       __skb_queue_purge(&head);
+                       __skb_queue_purge(pktchain);
        } while (!rc);
        return rc;
 }
 
-/* tipc_sk_mcast_rcv - Deliver multicast message to all destination sockets
+/**
+ * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
+ * @arrvq: queue with arriving messages, to be cloned after destination lookup
+ * @inputq: queue with cloned messages, delivered to socket after dest lookup
+ *
+ * Multi-threaded: parallel calls with reference to same queues may occur
  */
-void tipc_sk_mcast_rcv(struct sk_buff *buf)
+void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
+                      struct sk_buff_head *inputq)
 {
-       struct tipc_msg *msg = buf_msg(buf);
-       struct tipc_port_list dports = {0, NULL, };
-       struct tipc_port_list *item;
-       struct sk_buff *b;
-       uint i, last, dst = 0;
+       struct tipc_msg *msg;
+       struct tipc_plist dports;
+       u32 portid;
        u32 scope = TIPC_CLUSTER_SCOPE;
-
-       if (in_own_node(msg_orignode(msg)))
-               scope = TIPC_NODE_SCOPE;
-
-       /* Create destination port list: */
-       tipc_nametbl_mc_translate(msg_nametype(msg),
-                                 msg_namelower(msg),
-                                 msg_nameupper(msg),
-                                 scope,
-                                 &dports);
-       last = dports.count;
-       if (!last) {
-               kfree_skb(buf);
-               return;
-       }
-
-       for (item = &dports; item; item = item->next) {
-               for (i = 0; i < PLSIZE && ++dst <= last; i++) {
-                       b = (dst != last) ? skb_clone(buf, GFP_ATOMIC) : buf;
-                       if (!b) {
-                               pr_warn("Failed do clone mcast rcv buffer\n");
+       struct sk_buff_head tmpq;
+       uint hsz;
+       struct sk_buff *skb, *_skb;
+
+       __skb_queue_head_init(&tmpq);
+       tipc_plist_init(&dports);
+
+       skb = tipc_skb_peek(arrvq, &inputq->lock);
+       for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
+               msg = buf_msg(skb);
+               hsz = skb_headroom(skb) + msg_hdr_sz(msg);
+
+               if (in_own_node(net, msg_orignode(msg)))
+                       scope = TIPC_NODE_SCOPE;
+
+               /* Create destination port list and message clones: */
+               tipc_nametbl_mc_translate(net,
+                                         msg_nametype(msg), msg_namelower(msg),
+                                         msg_nameupper(msg), scope, &dports);
+               portid = tipc_plist_pop(&dports);
+               for (; portid; portid = tipc_plist_pop(&dports)) {
+                       _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
+                       if (_skb) {
+                               msg_set_destport(buf_msg(_skb), portid);
+                               __skb_queue_tail(&tmpq, _skb);
                                continue;
                        }
-                       msg_set_destport(msg, item->ports[i]);
-                       tipc_sk_rcv(b);
+                       pr_warn("Failed to clone mcast rcv buffer\n");
+               }
+               /* Append to inputq if not already done by other thread */
+               spin_lock_bh(&inputq->lock);
+               if (skb_peek(arrvq) == skb) {
+                       skb_queue_splice_tail_init(&tmpq, inputq);
+                       kfree_skb(__skb_dequeue(arrvq));
                }
+               spin_unlock_bh(&inputq->lock);
+               __skb_queue_purge(&tmpq);
+               kfree_skb(skb);
        }
-       tipc_port_list_free(&dports);
+       tipc_sk_rcv(net, inputq);
 }
 
 /**
  * tipc_sk_proto_rcv - receive a connection mng protocol message
  * @tsk: receiving socket
- * @dnode: node to send response message to, if any
- * @buf: buffer containing protocol message
- * Returns 0 (TIPC_OK) if message was consumed, 1 (TIPC_FWD_MSG) if
- * (CONN_PROBE_REPLY) message should be forwarded.
+ * @skb: pointer to message buffer. Set to NULL if buffer is consumed.
  */
-static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode,
-                            struct sk_buff *buf)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff **skb)
 {
-       struct tipc_msg *msg = buf_msg(buf);
+       struct tipc_msg *msg = buf_msg(*skb);
        int conn_cong;
-
+       u32 dnode;
+       u32 own_node = tsk_own_node(tsk);
        /* Ignore if connection cannot be validated: */
        if (!tsk_peer_msg(tsk, msg))
                goto exit;
@@ -816,15 +855,15 @@ static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode,
                if (conn_cong)
                        tsk->sk.sk_write_space(&tsk->sk);
        } else if (msg_type(msg) == CONN_PROBE) {
-               if (!tipc_msg_reverse(buf, dnode, TIPC_OK))
-                       return TIPC_OK;
-               msg_set_type(msg, CONN_PROBE_REPLY);
-               return TIPC_FWD_MSG;
+               if (tipc_msg_reverse(own_node, *skb, &dnode, TIPC_OK)) {
+                       msg_set_type(msg, CONN_PROBE_REPLY);
+                       return;
+               }
        }
        /* Do nothing if msg_type() == CONN_PROBE_REPLY */
 exit:
-       kfree_skb(buf);
-       return TIPC_OK;
+       kfree_skb(*skb);
+       *skb = NULL;
 }
 
 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
@@ -872,11 +911,13 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
+       struct net *net = sock_net(sk);
        struct tipc_msg *mhdr = &tsk->phdr;
        u32 dnode, dport;
-       struct sk_buff_head head;
+       struct sk_buff_head *pktchain = &sk->sk_write_queue;
        struct sk_buff *skb;
        struct tipc_name_seq *seq = &dest->addr.nameseq;
+       struct iov_iter save;
        u32 mtu;
        long timeo;
        int rc;
@@ -929,7 +970,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
                msg_set_nametype(mhdr, type);
                msg_set_nameinst(mhdr, inst);
                msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
-               dport = tipc_nametbl_translate(type, inst, &dnode);
+               dport = tipc_nametbl_translate(net, type, inst, &dnode);
                msg_set_destnode(mhdr, dnode);
                msg_set_destport(mhdr, dport);
                if (unlikely(!dport && !dnode)) {
@@ -945,31 +986,33 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
                msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
        }
 
+       save = m->msg_iter;
 new_mtu:
-       mtu = tipc_node_get_mtu(dnode, tsk->ref);
-       __skb_queue_head_init(&head);
-       rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);
+       mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
+       rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain);
        if (rc < 0)
                goto exit;
 
        do {
-               skb = skb_peek(&head);
+               skb = skb_peek(pktchain);
                TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
-               rc = tipc_link_xmit(&head, dnode, tsk->ref);
+               rc = tipc_link_xmit(net, pktchain, dnode, tsk->portid);
                if (likely(rc >= 0)) {
                        if (sock->state != SS_READY)
                                sock->state = SS_CONNECTING;
                        rc = dsz;
                        break;
                }
-               if (rc == -EMSGSIZE)
+               if (rc == -EMSGSIZE) {
+                       m->msg_iter = save;
                        goto new_mtu;
+               }
                if (rc != -ELINKCONG)
                        break;
                tsk->link_cong = 1;
                rc = tipc_wait_for_sndmsg(sock, &timeo);
                if (rc)
-                       __skb_queue_purge(&head);
+                       __skb_queue_purge(pktchain);
        } while (!rc);
 exit:
        if (iocb)
@@ -1024,15 +1067,17 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
                            struct msghdr *m, size_t dsz)
 {
        struct sock *sk = sock->sk;
+       struct net *net = sock_net(sk);
        struct tipc_sock *tsk = tipc_sk(sk);
        struct tipc_msg *mhdr = &tsk->phdr;
-       struct sk_buff_head head;
+       struct sk_buff_head *pktchain = &sk->sk_write_queue;
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
-       u32 ref = tsk->ref;
+       u32 portid = tsk->portid;
        int rc = -EINVAL;
        long timeo;
        u32 dnode;
        uint mtu, send, sent = 0;
+       struct iov_iter save;
 
        /* Handle implied connection establishment */
        if (unlikely(dest)) {
@@ -1059,15 +1104,15 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
        dnode = tsk_peer_node(tsk);
 
 next:
+       save = m->msg_iter;
        mtu = tsk->max_pkt;
        send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
-       __skb_queue_head_init(&head);
-       rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);
+       rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain);
        if (unlikely(rc < 0))
                goto exit;
        do {
                if (likely(!tsk_conn_cong(tsk))) {
-                       rc = tipc_link_xmit(&head, dnode, ref);
+                       rc = tipc_link_xmit(net, pktchain, dnode, portid);
                        if (likely(!rc)) {
                                tsk->sent_unacked++;
                                sent += send;
@@ -1076,7 +1121,9 @@ next:
                                goto next;
                        }
                        if (rc == -EMSGSIZE) {
-                               tsk->max_pkt = tipc_node_get_mtu(dnode, ref);
+                               tsk->max_pkt = tipc_node_get_mtu(net, dnode,
+                                                                portid);
+                               m->msg_iter = save;
                                goto next;
                        }
                        if (rc != -ELINKCONG)
@@ -1085,7 +1132,7 @@ next:
                }
                rc = tipc_wait_for_sndpkt(sock, &timeo);
                if (rc)
-                       __skb_queue_purge(&head);
+                       __skb_queue_purge(pktchain);
        } while (!rc);
 exit:
        if (iocb)
@@ -1118,6 +1165,8 @@ static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
                                u32 peer_node)
 {
+       struct sock *sk = &tsk->sk;
+       struct net *net = sock_net(sk);
        struct tipc_msg *msg = &tsk->phdr;
 
        msg_set_destnode(msg, peer_node);
@@ -1126,12 +1175,12 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
        msg_set_lookup_scope(msg, 0);
        msg_set_hdr_sz(msg, SHORT_H_SIZE);
 
-       tsk->probing_interval = CONN_PROBING_INTERVAL;
+       tsk->probing_intv = CONN_PROBING_INTERVAL;
        tsk->probing_state = TIPC_CONN_OK;
        tsk->connected = 1;
-       k_start_timer(&tsk->timer, tsk->probing_interval);
-       tipc_node_add_conn(peer_node, tsk->ref, peer_port);
-       tsk->max_pkt = tipc_node_get_mtu(peer_node, tsk->ref);
+       sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
+       tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
+       tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
 }
 
 /**
@@ -1230,6 +1279,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
 
 static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
 {
+       struct net *net = sock_net(&tsk->sk);
        struct sk_buff *skb = NULL;
        struct tipc_msg *msg;
        u32 peer_port = tsk_peer_port(tsk);
@@ -1237,13 +1287,14 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
 
        if (!tsk->connected)
                return;
-       skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
-                             tipc_own_addr, peer_port, tsk->ref, TIPC_OK);
+       skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
+                             dnode, tsk_own_node(tsk), peer_port,
+                             tsk->portid, TIPC_OK);
        if (!skb)
                return;
        msg = buf_msg(skb);
        msg_set_msgcnt(msg, ack);
-       tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg));
+       tipc_link_xmit_skb(net, skb, dnode, msg_link_selector(msg));
 }
 
 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
@@ -1529,15 +1580,16 @@ static void tipc_data_ready(struct sock *sk)
 /**
  * filter_connect - Handle all incoming messages for a connection-based socket
  * @tsk: TIPC socket
- * @msg: message
+ * @skb: pointer to message buffer. Set to NULL if buffer is consumed
  *
  * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
  */
-static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
+static int filter_connect(struct tipc_sock *tsk, struct sk_buff **skb)
 {
        struct sock *sk = &tsk->sk;
+       struct net *net = sock_net(sk);
        struct socket *sock = sk->sk_socket;
-       struct tipc_msg *msg = buf_msg(*buf);
+       struct tipc_msg *msg = buf_msg(*skb);
        int retval = -TIPC_ERR_NO_PORT;
 
        if (msg_mcast(msg))
@@ -1551,8 +1603,8 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
                                sock->state = SS_DISCONNECTING;
                                tsk->connected = 0;
                                /* let timer expire on it's own */
-                               tipc_node_remove_conn(tsk_peer_node(tsk),
-                                                     tsk->ref);
+                               tipc_node_remove_conn(net, tsk_peer_node(tsk),
+                                                     tsk->portid);
                        }
                        retval = TIPC_OK;
                }
@@ -1587,8 +1639,8 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
                 * connect() routine if sleeping.
                 */
                if (msg_data_sz(msg) == 0) {
-                       kfree_skb(*buf);
-                       *buf = NULL;
+                       kfree_skb(*skb);
+                       *skb = NULL;
                        if (waitqueue_active(sk_sleep(sk)))
                                wake_up_interruptible(sk_sleep(sk));
                }
@@ -1640,32 +1692,33 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
 /**
  * filter_rcv - validate incoming message
  * @sk: socket
- * @buf: message
+ * @skb: pointer to message. Set to NULL if buffer is consumed.
  *
  * Enqueues message on receive queue if acceptable; optionally handles
  * disconnect indication for a connected socket.
  *
- * Called with socket lock already taken; port lock may also be taken.
+ * Called with socket lock already taken
  *
- * Returns 0 (TIPC_OK) if message was consumed, -TIPC error code if message
- * to be rejected, 1 (TIPC_FWD_MSG) if (CONN_MANAGER) message to be forwarded
+ * Returns 0 (TIPC_OK) if message was ok, -TIPC error code if rejected
  */
-static int filter_rcv(struct sock *sk, struct sk_buff *buf)
+static int filter_rcv(struct sock *sk, struct sk_buff **skb)
 {
        struct socket *sock = sk->sk_socket;
        struct tipc_sock *tsk = tipc_sk(sk);
-       struct tipc_msg *msg = buf_msg(buf);
-       unsigned int limit = rcvbuf_limit(sk, buf);
-       u32 onode;
+       struct tipc_msg *msg = buf_msg(*skb);
+       unsigned int limit = rcvbuf_limit(sk, *skb);
        int rc = TIPC_OK;
 
-       if (unlikely(msg_user(msg) == CONN_MANAGER))
-               return tipc_sk_proto_rcv(tsk, &onode, buf);
+       if (unlikely(msg_user(msg) == CONN_MANAGER)) {
+               tipc_sk_proto_rcv(tsk, skb);
+               return TIPC_OK;
+       }
 
        if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
-               kfree_skb(buf);
+               kfree_skb(*skb);
                tsk->link_cong = 0;
                sk->sk_write_space(sk);
+               *skb = NULL;
                return TIPC_OK;
        }
 
@@ -1677,21 +1730,22 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf)
                if (msg_connected(msg))
                        return -TIPC_ERR_NO_PORT;
        } else {
-               rc = filter_connect(tsk, &buf);
-               if (rc != TIPC_OK || buf == NULL)
+               rc = filter_connect(tsk, skb);
+               if (rc != TIPC_OK || !*skb)
                        return rc;
        }
 
        /* Reject message if there isn't room to queue it */
-       if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
+       if (sk_rmem_alloc_get(sk) + (*skb)->truesize >= limit)
                return -TIPC_ERR_OVERLOAD;
 
        /* Enqueue message */
-       TIPC_SKB_CB(buf)->handle = NULL;
-       __skb_queue_tail(&sk->sk_receive_queue, buf);
-       skb_set_owner_r(buf, sk);
+       TIPC_SKB_CB(*skb)->handle = NULL;
+       __skb_queue_tail(&sk->sk_receive_queue, *skb);
+       skb_set_owner_r(*skb, sk);
 
        sk->sk_data_ready(sk);
+       *skb = NULL;
        return TIPC_OK;
 }
 
@@ -1700,78 +1754,125 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf)
  * @sk: socket
  * @skb: message
  *
- * Caller must hold socket lock, but not port lock.
+ * Caller must hold socket lock
  *
  * Returns 0
  */
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
-       int rc;
-       u32 onode;
+       int err;
+       atomic_t *dcnt;
+       u32 dnode;
        struct tipc_sock *tsk = tipc_sk(sk);
+       struct net *net = sock_net(sk);
        uint truesize = skb->truesize;
 
-       rc = filter_rcv(sk, skb);
-
-       if (likely(!rc)) {
-               if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
-                       atomic_add(truesize, &tsk->dupl_rcvcnt);
+       err = filter_rcv(sk, &skb);
+       if (likely(!skb)) {
+               dcnt = &tsk->dupl_rcvcnt;
+               if (atomic_read(dcnt) < TIPC_CONN_OVERLOAD_LIMIT)
+                       atomic_add(truesize, dcnt);
                return 0;
        }
+       if (!err || tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode, -err))
+               tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+       return 0;
+}
 
-       if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc))
-               return 0;
-
-       tipc_link_xmit_skb(skb, onode, 0);
+/**
+ * tipc_sk_enqueue - extract all buffers with destination 'dport' from
+ *                   inputq and try adding them to socket or backlog queue
+ * @inputq: list of incoming buffers with potentially different destinations
+ * @sk: socket where the buffers should be enqueued
+ * @dport: port number for the socket
+ * @_skb: returned buffer to be forwarded or rejected, if applicable
+ *
+ * Caller must hold socket lock
+ *
+ * Returns TIPC_OK if all buffers enqueued, otherwise -TIPC_ERR_OVERLOAD
+ * or -TIPC_ERR_NO_PORT
+ */
+static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
+                          u32 dport, struct sk_buff **_skb)
+{
+       unsigned int lim;
+       atomic_t *dcnt;
+       int err;
+       struct sk_buff *skb;
+       unsigned long time_limit = jiffies + 2;
 
-       return 0;
+       while (skb_queue_len(inputq)) {
+               if (unlikely(time_after_eq(jiffies, time_limit)))
+                       return TIPC_OK;
+               skb = tipc_skb_dequeue(inputq, dport);
+               if (unlikely(!skb))
+                       return TIPC_OK;
+               if (!sock_owned_by_user(sk)) {
+                       err = filter_rcv(sk, &skb);
+                       if (likely(!skb))
+                               continue;
+                       *_skb = skb;
+                       return err;
+               }
+               dcnt = &tipc_sk(sk)->dupl_rcvcnt;
+               if (sk->sk_backlog.len)
+                       atomic_set(dcnt, 0);
+               lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
+               if (likely(!sk_add_backlog(sk, skb, lim)))
+                       continue;
+               *_skb = skb;
+               return -TIPC_ERR_OVERLOAD;
+       }
+       return TIPC_OK;
 }
 
 /**
- * tipc_sk_rcv - handle incoming message
- * @skb: buffer containing arriving message
- * Consumes buffer
- * Returns 0 if success, or errno: -EHOSTUNREACH
+ * tipc_sk_rcv - handle a chain of incoming buffers
+ * @inputq: buffer list containing the buffers
+ * Consumes all buffers in list until inputq is empty
+ * Note: may be called in multiple threads referring to the same queue
+ * Returns 0 if last buffer was accepted, otherwise -EHOSTUNREACH
+ * Only node local calls check the return value, sending single-buffer queues
  */
-int tipc_sk_rcv(struct sk_buff *skb)
+int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
 {
+       u32 dnode, dport = 0;
+       int err = -TIPC_ERR_NO_PORT;
+       struct sk_buff *skb;
        struct tipc_sock *tsk;
+       struct tipc_net *tn;
        struct sock *sk;
-       u32 dport = msg_destport(buf_msg(skb));
-       int rc = TIPC_OK;
-       uint limit;
-       u32 dnode;
 
-       /* Validate destination and message */
-       tsk = tipc_sk_get(dport);
-       if (unlikely(!tsk)) {
-               rc = tipc_msg_eval(skb, &dnode);
-               goto exit;
+       while (skb_queue_len(inputq)) {
+               skb = NULL;
+               dport = tipc_skb_peek_port(inputq, dport);
+               tsk = tipc_sk_lookup(net, dport);
+               if (likely(tsk)) {
+                       sk = &tsk->sk;
+                       if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
+                               err = tipc_sk_enqueue(inputq, sk, dport, &skb);
+                               spin_unlock_bh(&sk->sk_lock.slock);
+                               dport = 0;
+                       }
+                       sock_put(sk);
+               } else {
+                       skb = tipc_skb_dequeue(inputq, dport);
+               }
+               if (likely(!skb))
+                       continue;
+               if (tipc_msg_lookup_dest(net, skb, &dnode, &err))
+                       goto xmit;
+               if (!err) {
+                       dnode = msg_destnode(buf_msg(skb));
+                       goto xmit;
+               }
+               tn = net_generic(net, tipc_net_id);
+               if (!tipc_msg_reverse(tn->own_addr, skb, &dnode, -err))
+                       continue;
+xmit:
+               tipc_link_xmit_skb(net, skb, dnode, dport);
        }
-       sk = &tsk->sk;
-
-       /* Queue message */
-       spin_lock_bh(&sk->sk_lock.slock);
-
-       if (!sock_owned_by_user(sk)) {
-               rc = filter_rcv(sk, skb);
-       } else {
-               if (sk->sk_backlog.len == 0)
-                       atomic_set(&tsk->dupl_rcvcnt, 0);
-               limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt);
-               if (sk_add_backlog(sk, skb, limit))
-                       rc = -TIPC_ERR_OVERLOAD;
-       }
-       spin_unlock_bh(&sk->sk_lock.slock);
-       tipc_sk_put(tsk);
-       if (likely(!rc))
-               return 0;
-exit:
-       if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc))
-               return -EHOSTUNREACH;
-
-       tipc_link_xmit_skb(skb, dnode, 0);
-       return (rc < 0) ? -EHOSTUNREACH : 0;
+       return err ? -EHOSTUNREACH : 0;
 }
 
 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -2027,6 +2128,7 @@ exit:
 static int tipc_shutdown(struct socket *sock, int how)
 {
        struct sock *sk = sock->sk;
+       struct net *net = sock_net(sk);
        struct tipc_sock *tsk = tipc_sk(sk);
        struct sk_buff *skb;
        u32 dnode;
@@ -2049,21 +2151,24 @@ restart:
                                kfree_skb(skb);
                                goto restart;
                        }
-                       if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN))
-                               tipc_link_xmit_skb(skb, dnode, tsk->ref);
-                       tipc_node_remove_conn(dnode, tsk->ref);
+                       if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
+                                            TIPC_CONN_SHUTDOWN))
+                               tipc_link_xmit_skb(net, skb, dnode,
+                                                  tsk->portid);
+                       tipc_node_remove_conn(net, dnode, tsk->portid);
                } else {
                        dnode = tsk_peer_node(tsk);
+
                        skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
                                              TIPC_CONN_MSG, SHORT_H_SIZE,
-                                             0, dnode, tipc_own_addr,
+                                             0, dnode, tsk_own_node(tsk),
                                              tsk_peer_port(tsk),
-                                             tsk->ref, TIPC_CONN_SHUTDOWN);
-                       tipc_link_xmit_skb(skb, dnode, tsk->ref);
+                                             tsk->portid, TIPC_CONN_SHUTDOWN);
+                       tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
                }
                tsk->connected = 0;
                sock->state = SS_DISCONNECTING;
-               tipc_node_remove_conn(dnode, tsk->ref);
+               tipc_node_remove_conn(net, dnode, tsk->portid);
                /* fall through */
 
        case SS_DISCONNECTING:
@@ -2084,18 +2189,14 @@ restart:
        return res;
 }
 
-static void tipc_sk_timeout(unsigned long ref)
+static void tipc_sk_timeout(unsigned long data)
 {
-       struct tipc_sock *tsk;
-       struct sock *sk;
+       struct tipc_sock *tsk = (struct tipc_sock *)data;
+       struct sock *sk = &tsk->sk;
        struct sk_buff *skb = NULL;
        u32 peer_port, peer_node;
+       u32 own_node = tsk_own_node(tsk);
 
-       tsk = tipc_sk_get(ref);
-       if (!tsk)
-               return;
-
-       sk = &tsk->sk;
        bh_lock_sock(sk);
        if (!tsk->connected) {
                bh_unlock_sock(sk);
@@ -2106,38 +2207,39 @@ static void tipc_sk_timeout(unsigned long ref)
 
        if (tsk->probing_state == TIPC_CONN_PROBING) {
                /* Previous probe not answered -> self abort */
-               skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
-                                     SHORT_H_SIZE, 0, tipc_own_addr,
-                                     peer_node, ref, peer_port,
-                                     TIPC_ERR_NO_PORT);
+               skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
+                                     TIPC_CONN_MSG, SHORT_H_SIZE, 0,
+                                     own_node, peer_node, tsk->portid,
+                                     peer_port, TIPC_ERR_NO_PORT);
        } else {
-               skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
-                                     0, peer_node, tipc_own_addr,
-                                     peer_port, ref, TIPC_OK);
+               skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
+                                     INT_H_SIZE, 0, peer_node, own_node,
+                                     peer_port, tsk->portid, TIPC_OK);
                tsk->probing_state = TIPC_CONN_PROBING;
-               k_start_timer(&tsk->timer, tsk->probing_interval);
+               sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
        }
        bh_unlock_sock(sk);
        if (skb)
-               tipc_link_xmit_skb(skb, peer_node, ref);
+               tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
 exit:
-       tipc_sk_put(tsk);
+       sock_put(sk);
 }
 
 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
                           struct tipc_name_seq const *seq)
 {
+       struct net *net = sock_net(&tsk->sk);
        struct publication *publ;
        u32 key;
 
        if (tsk->connected)
                return -EINVAL;
-       key = tsk->ref + tsk->pub_count + 1;
-       if (key == tsk->ref)
+       key = tsk->portid + tsk->pub_count + 1;
+       if (key == tsk->portid)
                return -EADDRINUSE;
 
-       publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
-                                   scope, tsk->ref, key);
+       publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
+                                   scope, tsk->portid, key);
        if (unlikely(!publ))
                return -EINVAL;
 
@@ -2150,6 +2252,7 @@ static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
                            struct tipc_name_seq const *seq)
 {
+       struct net *net = sock_net(&tsk->sk);
        struct publication *publ;
        struct publication *safe;
        int rc = -EINVAL;
@@ -2164,12 +2267,12 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
                                continue;
                        if (publ->upper != seq->upper)
                                break;
-                       tipc_nametbl_withdraw(publ->type, publ->lower,
+                       tipc_nametbl_withdraw(net, publ->type, publ->lower,
                                              publ->ref, publ->key);
                        rc = 0;
                        break;
                }
-               tipc_nametbl_withdraw(publ->type, publ->lower,
+               tipc_nametbl_withdraw(net, publ->type, publ->lower,
                                      publ->ref, publ->key);
                rc = 0;
        }
@@ -2181,16 +2284,18 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
 static int tipc_sk_show(struct tipc_sock *tsk, char *buf,
                        int len, int full_id)
 {
+       struct net *net = sock_net(&tsk->sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct publication *publ;
        int ret;
 
        if (full_id)
                ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:",
-                                   tipc_zone(tipc_own_addr),
-                                   tipc_cluster(tipc_own_addr),
-                                   tipc_node(tipc_own_addr), tsk->ref);
+                                   tipc_zone(tn->own_addr),
+                                   tipc_cluster(tn->own_addr),
+                                   tipc_node(tn->own_addr), tsk->portid);
        else
-               ret = tipc_snprintf(buf, len, "%-10u:", tsk->ref);
+               ret = tipc_snprintf(buf, len, "%-10u:", tsk->portid);
 
        if (tsk->connected) {
                u32 dport = tsk_peer_port(tsk);
@@ -2222,15 +2327,18 @@ static int tipc_sk_show(struct tipc_sock *tsk, char *buf,
        return ret;
 }
 
-struct sk_buff *tipc_sk_socks_show(void)
+struct sk_buff *tipc_sk_socks_show(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       const struct bucket_table *tbl;
+       struct rhash_head *pos;
        struct sk_buff *buf;
        struct tlv_desc *rep_tlv;
        char *pb;
        int pb_len;
        struct tipc_sock *tsk;
        int str_len = 0;
-       u32 ref = 0;
+       int i;
 
        buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
        if (!buf)
@@ -2239,14 +2347,18 @@ struct sk_buff *tipc_sk_socks_show(void)
        pb = TLV_DATA(rep_tlv);
        pb_len = ULTRA_STRING_MAX_LEN;
 
-       tsk = tipc_sk_get_next(&ref);
-       for (; tsk; tsk = tipc_sk_get_next(&ref)) {
-               lock_sock(&tsk->sk);
-               str_len += tipc_sk_show(tsk, pb + str_len,
-                                       pb_len - str_len, 0);
-               release_sock(&tsk->sk);
-               tipc_sk_put(tsk);
+       rcu_read_lock();
+       tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
+       for (i = 0; i < tbl->size; i++) {
+               rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
+                       spin_lock_bh(&tsk->sk.sk_lock.slock);
+                       str_len += tipc_sk_show(tsk, pb + str_len,
+                                               pb_len - str_len, 0);
+                       spin_unlock_bh(&tsk->sk.sk_lock.slock);
+               }
        }
+       rcu_read_unlock();
+
        str_len += 1;   /* for "\0" */
        skb_put(buf, TLV_SPACE(str_len));
        TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -2257,257 +2369,102 @@ struct sk_buff *tipc_sk_socks_show(void)
 /* tipc_sk_reinit: set non-zero address in all existing sockets
  *                 when we go from standalone to network mode.
  */
-void tipc_sk_reinit(void)
+void tipc_sk_reinit(struct net *net)
 {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       const struct bucket_table *tbl;
+       struct rhash_head *pos;
+       struct tipc_sock *tsk;
        struct tipc_msg *msg;
-       u32 ref = 0;
-       struct tipc_sock *tsk = tipc_sk_get_next(&ref);
+       int i;
 
-       for (; tsk; tsk = tipc_sk_get_next(&ref)) {
-               lock_sock(&tsk->sk);
-               msg = &tsk->phdr;
-               msg_set_prevnode(msg, tipc_own_addr);
-               msg_set_orignode(msg, tipc_own_addr);
-               release_sock(&tsk->sk);
-               tipc_sk_put(tsk);
+       rcu_read_lock();
+       tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
+       for (i = 0; i < tbl->size; i++) {
+               rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
+                       spin_lock_bh(&tsk->sk.sk_lock.slock);
+                       msg = &tsk->phdr;
+                       msg_set_prevnode(msg, tn->own_addr);
+                       msg_set_orignode(msg, tn->own_addr);
+                       spin_unlock_bh(&tsk->sk.sk_lock.slock);
+               }
        }
+       rcu_read_unlock();
 }
 
-/**
- * struct reference - TIPC socket reference entry
- * @tsk: pointer to socket associated with reference entry
- * @ref: reference value for socket (combines instance & array index info)
- */
-struct reference {
-       struct tipc_sock *tsk;
-       u32 ref;
-};
-
-/**
- * struct tipc_ref_table - table of TIPC socket reference entries
- * @entries: pointer to array of reference entries
- * @capacity: array index of first unusable entry
- * @init_point: array index of first uninitialized entry
- * @first_free: array index of first unused socket reference entry
- * @last_free: array index of last unused socket reference entry
- * @index_mask: bitmask for array index portion of reference values
- * @start_mask: initial value for instance value portion of reference values
- */
-struct ref_table {
-       struct reference *entries;
-       u32 capacity;
-       u32 init_point;
-       u32 first_free;
-       u32 last_free;
-       u32 index_mask;
-       u32 start_mask;
-};
-
-/* Socket reference table consists of 2**N entries.
- *
- * State       Socket ptr      Reference
- * -----        ----------      ---------
- * In use        non-NULL       XXXX|own index
- *                             (XXXX changes each time entry is acquired)
- * Free            NULL         YYYY|next free index
- *                             (YYYY is one more than last used XXXX)
- * Uninitialized   NULL         0
- *
- * Entry 0 is not used; this allows index 0 to denote the end of the free list.
- *
- * Note that a reference value of 0 does not necessarily indicate that an
- * entry is uninitialized, since the last entry in the free list could also
- * have a reference value of 0 (although this is unlikely).
- */
-
-static struct ref_table tipc_ref_table;
-
-static DEFINE_RWLOCK(ref_table_lock);
-
-/**
- * tipc_ref_table_init - create reference table for sockets
- */
-int tipc_sk_ref_table_init(u32 req_sz, u32 start)
+static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
 {
-       struct reference *table;
-       u32 actual_sz;
-
-       /* account for unused entry, then round up size to a power of 2 */
-
-       req_sz++;
-       for (actual_sz = 16; actual_sz < req_sz; actual_sz <<= 1) {
-               /* do nothing */
-       };
-
-       /* allocate table & mark all entries as uninitialized */
-       table = vzalloc(actual_sz * sizeof(struct reference));
-       if (table == NULL)
-               return -ENOMEM;
-
-       tipc_ref_table.entries = table;
-       tipc_ref_table.capacity = req_sz;
-       tipc_ref_table.init_point = 1;
-       tipc_ref_table.first_free = 0;
-       tipc_ref_table.last_free = 0;
-       tipc_ref_table.index_mask = actual_sz - 1;
-       tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_sock *tsk;
 
-       return 0;
-}
+       rcu_read_lock();
+       tsk = rhashtable_lookup(&tn->sk_rht, &portid);
+       if (tsk)
+               sock_hold(&tsk->sk);
+       rcu_read_unlock();
 
-/**
- * tipc_ref_table_stop - destroy reference table for sockets
- */
-void tipc_sk_ref_table_stop(void)
-{
-       if (!tipc_ref_table.entries)
-               return;
-       vfree(tipc_ref_table.entries);
-       tipc_ref_table.entries = NULL;
+       return tsk;
 }
 
-/* tipc_ref_acquire - create reference to a socket
- *
- * Register an socket pointer in the reference table.
- * Returns a unique reference value that is used from then on to retrieve the
- * socket pointer, or to determine if the socket has been deregistered.
- */
-u32 tipc_sk_ref_acquire(struct tipc_sock *tsk)
+static int tipc_sk_insert(struct tipc_sock *tsk)
 {
-       u32 index;
-       u32 index_mask;
-       u32 next_plus_upper;
-       u32 ref = 0;
-       struct reference *entry;
-
-       if (unlikely(!tsk)) {
-               pr_err("Attempt to acquire ref. to non-existent obj\n");
-               return 0;
-       }
-       if (unlikely(!tipc_ref_table.entries)) {
-               pr_err("Ref. table not found in acquisition attempt\n");
-               return 0;
-       }
-
-       /* Take a free entry, if available; otherwise initialize a new one */
-       write_lock_bh(&ref_table_lock);
-       index = tipc_ref_table.first_free;
-       entry = &tipc_ref_table.entries[index];
-
-       if (likely(index)) {
-               index = tipc_ref_table.first_free;
-               entry = &tipc_ref_table.entries[index];
-               index_mask = tipc_ref_table.index_mask;
-               next_plus_upper = entry->ref;
-               tipc_ref_table.first_free = next_plus_upper & index_mask;
-               ref = (next_plus_upper & ~index_mask) + index;
-               entry->tsk = tsk;
-       } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
-               index = tipc_ref_table.init_point++;
-               entry = &tipc_ref_table.entries[index];
-               ref = tipc_ref_table.start_mask + index;
+       struct sock *sk = &tsk->sk;
+       struct net *net = sock_net(sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
+       u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
+
+       while (remaining--) {
+               portid++;
+               if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
+                       portid = TIPC_MIN_PORT;
+               tsk->portid = portid;
+               sock_hold(&tsk->sk);
+               if (rhashtable_lookup_insert(&tn->sk_rht, &tsk->node))
+                       return 0;
+               sock_put(&tsk->sk);
        }
 
-       if (ref) {
-               entry->ref = ref;
-               entry->tsk = tsk;
-       }
-       write_unlock_bh(&ref_table_lock);
-       return ref;
+       return -1;
 }
 
-/* tipc_sk_ref_discard - invalidate reference to an socket
- *
- * Disallow future references to an socket and free up the entry for re-use.
- */
-void tipc_sk_ref_discard(u32 ref)
+static void tipc_sk_remove(struct tipc_sock *tsk)
 {
-       struct reference *entry;
-       u32 index;
-       u32 index_mask;
-
-       if (unlikely(!tipc_ref_table.entries)) {
-               pr_err("Ref. table not found during discard attempt\n");
-               return;
-       }
-
-       index_mask = tipc_ref_table.index_mask;
-       index = ref & index_mask;
-       entry = &tipc_ref_table.entries[index];
-
-       write_lock_bh(&ref_table_lock);
+       struct sock *sk = &tsk->sk;
+       struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
 
-       if (unlikely(!entry->tsk)) {
-               pr_err("Attempt to discard ref. to non-existent socket\n");
-               goto exit;
+       if (rhashtable_remove(&tn->sk_rht, &tsk->node)) {
+               WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+               __sock_put(sk);
        }
-       if (unlikely(entry->ref != ref)) {
-               pr_err("Attempt to discard non-existent reference\n");
-               goto exit;
-       }
-
-       /* Mark entry as unused; increment instance part of entry's
-        *   reference to invalidate any subsequent references
-        */
-
-       entry->tsk = NULL;
-       entry->ref = (ref & ~index_mask) + (index_mask + 1);
-
-       /* Append entry to free entry list */
-       if (unlikely(tipc_ref_table.first_free == 0))
-               tipc_ref_table.first_free = index;
-       else
-               tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index;
-       tipc_ref_table.last_free = index;
-exit:
-       write_unlock_bh(&ref_table_lock);
 }
 
-/* tipc_sk_get - find referenced socket and return pointer to it
- */
-struct tipc_sock *tipc_sk_get(u32 ref)
+int tipc_sk_rht_init(struct net *net)
 {
-       struct reference *entry;
-       struct tipc_sock *tsk;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct rhashtable_params rht_params = {
+               .nelem_hint = 192,
+               .head_offset = offsetof(struct tipc_sock, node),
+               .key_offset = offsetof(struct tipc_sock, portid),
+               .key_len = sizeof(u32), /* portid */
+               .hashfn = jhash,
+               .max_shift = 20, /* 1M */
+               .min_shift = 8,  /* 256 */
+               .grow_decision = rht_grow_above_75,
+               .shrink_decision = rht_shrink_below_30,
+       };
 
-       if (unlikely(!tipc_ref_table.entries))
-               return NULL;
-       read_lock_bh(&ref_table_lock);
-       entry = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
-       tsk = entry->tsk;
-       if (likely(tsk && (entry->ref == ref)))
-               sock_hold(&tsk->sk);
-       else
-               tsk = NULL;
-       read_unlock_bh(&ref_table_lock);
-       return tsk;
+       return rhashtable_init(&tn->sk_rht, &rht_params);
 }
 
-/* tipc_sk_get_next - lock & return next socket after referenced one
-*/
-struct tipc_sock *tipc_sk_get_next(u32 *ref)
+void tipc_sk_rht_destroy(struct net *net)
 {
-       struct reference *entry;
-       struct tipc_sock *tsk = NULL;
-       uint index = *ref & tipc_ref_table.index_mask;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-       read_lock_bh(&ref_table_lock);
-       while (++index < tipc_ref_table.capacity) {
-               entry = &tipc_ref_table.entries[index];
-               if (!entry->tsk)
-                       continue;
-               tsk = entry->tsk;
-               sock_hold(&tsk->sk);
-               *ref = entry->ref;
-               break;
-       }
-       read_unlock_bh(&ref_table_lock);
-       return tsk;
-}
+       /* Wait for socket readers to complete */
+       synchronize_net();
 
-static void tipc_sk_put(struct tipc_sock *tsk)
-{
-       sock_put(&tsk->sk);
+       rhashtable_destroy(&tn->sk_rht);
 }
 
 /**
@@ -2639,8 +2596,9 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
        return put_user(sizeof(value), ol);
 }
 
-static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
+static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 {
+       struct sock *sk = sock->sk;
        struct tipc_sioc_ln_req lnr;
        void __user *argp = (void __user *)arg;
 
@@ -2648,7 +2606,8 @@ static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
        case SIOCGETLINKNAME:
                if (copy_from_user(&lnr, argp, sizeof(lnr)))
                        return -EFAULT;
-               if (!tipc_node_get_linkname(lnr.bearer_id & 0xffff, lnr.peer,
+               if (!tipc_node_get_linkname(sock_net(sk),
+                                           lnr.bearer_id & 0xffff, lnr.peer,
                                            lnr.linkname, TIPC_MAX_LINK_NAME)) {
                        if (copy_to_user(argp, &lnr, sizeof(lnr)))
                                return -EFAULT;
@@ -2820,6 +2779,8 @@ static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
        int err;
        void *hdr;
        struct nlattr *attrs;
+       struct net *net = sock_net(skb->sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
 
        hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                          &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
@@ -2829,9 +2790,9 @@ static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
        attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
        if (!attrs)
                goto genlmsg_cancel;
-       if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->ref))
+       if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
                goto attr_msg_cancel;
-       if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr))
+       if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
                goto attr_msg_cancel;
 
        if (tsk->connected) {
@@ -2859,22 +2820,37 @@ int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
        int err;
        struct tipc_sock *tsk;
-       u32 prev_ref = cb->args[0];
-       u32 ref = prev_ref;
-
-       tsk = tipc_sk_get_next(&ref);
-       for (; tsk; tsk = tipc_sk_get_next(&ref)) {
-               lock_sock(&tsk->sk);
-               err = __tipc_nl_add_sk(skb, cb, tsk);
-               release_sock(&tsk->sk);
-               tipc_sk_put(tsk);
-               if (err)
-                       break;
+       const struct bucket_table *tbl;
+       struct rhash_head *pos;
+       struct net *net = sock_net(skb->sk);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       u32 tbl_id = cb->args[0];
+       u32 prev_portid = cb->args[1];
 
-               prev_ref = ref;
-       }
+       rcu_read_lock();
+       tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
+       for (; tbl_id < tbl->size; tbl_id++) {
+               rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
+                       spin_lock_bh(&tsk->sk.sk_lock.slock);
+                       if (prev_portid && prev_portid != tsk->portid) {
+                               spin_unlock_bh(&tsk->sk.sk_lock.slock);
+                               continue;
+                       }
 
-       cb->args[0] = prev_ref;
+                       err = __tipc_nl_add_sk(skb, cb, tsk);
+                       if (err) {
+                               prev_portid = tsk->portid;
+                               spin_unlock_bh(&tsk->sk.sk_lock.slock);
+                               goto out;
+                       }
+                       prev_portid = 0;
+                       spin_unlock_bh(&tsk->sk.sk_lock.slock);
+               }
+       }
+out:
+       rcu_read_unlock();
+       cb->args[0] = tbl_id;
+       cb->args[1] = prev_portid;
 
        return skb->len;
 }
@@ -2962,12 +2938,13 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
        int err;
-       u32 tsk_ref = cb->args[0];
+       u32 tsk_portid = cb->args[0];
        u32 last_publ = cb->args[1];
        u32 done = cb->args[2];
+       struct net *net = sock_net(skb->sk);
        struct tipc_sock *tsk;
 
-       if (!tsk_ref) {
+       if (!tsk_portid) {
                struct nlattr **attrs;
                struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
 
@@ -2984,13 +2961,13 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
                if (!sock[TIPC_NLA_SOCK_REF])
                        return -EINVAL;
 
-               tsk_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
+               tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
        }
 
        if (done)
                return 0;
 
-       tsk = tipc_sk_get(tsk_ref);
+       tsk = tipc_sk_lookup(net, tsk_portid);
        if (!tsk)
                return -EINVAL;
 
@@ -2999,9 +2976,9 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
        if (!err)
                done = 1;
        release_sock(&tsk->sk);
-       tipc_sk_put(tsk);
+       sock_put(&tsk->sk);
 
-       cb->args[0] = tsk_ref;
+       cb->args[0] = tsk_portid;
        cb->args[1] = last_publ;
        cb->args[2] = done;
 
index d3408938700677e427b7ccbfe80a2dcf009f095f..8be0da7df8fc0c699c8e3778da6b62b79a64aab8 100644 (file)
@@ -1,6 +1,6 @@
 /* net/tipc/socket.h: Include file for TIPC socket code
  *
- * Copyright (c) 2014, Ericsson AB
+ * Copyright (c) 2014-2015, Ericsson AB
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #define TIPC_FLOWCTRL_WIN        (TIPC_CONNACK_INTV * 2)
 #define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
                                  SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
-int tipc_sk_rcv(struct sk_buff *buf);
-struct sk_buff *tipc_sk_socks_show(void);
-void tipc_sk_mcast_rcv(struct sk_buff *buf);
-void tipc_sk_reinit(void);
-int tipc_sk_ref_table_init(u32 requested_size, u32 start);
-void tipc_sk_ref_table_stop(void);
+int tipc_socket_init(void);
+void tipc_socket_stop(void);
+int tipc_sock_create_local(struct net *net, int type, struct socket **res);
+void tipc_sock_release_local(struct socket *sock);
+int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
+                          int flags);
+int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
+struct sk_buff *tipc_sk_socks_show(struct net *net);
+void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
+                      struct sk_buff_head *inputq);
+void tipc_sk_reinit(struct net *net);
+int tipc_sk_rht_init(struct net *net);
+void tipc_sk_rht_destroy(struct net *net);
 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
index 0344206b984f7f168a742726e8ad853940ff9cec..72c339e432aa6693a30957060b022bb2bc8f8785 100644 (file)
@@ -50,33 +50,6 @@ struct tipc_subscriber {
        struct list_head subscription_list;
 };
 
-static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
-                                 void *usr_data, void *buf, size_t len);
-static void *subscr_named_msg_event(int conid);
-static void subscr_conn_shutdown_event(int conid, void *usr_data);
-
-static atomic_t subscription_count = ATOMIC_INIT(0);
-
-static struct sockaddr_tipc topsrv_addr __read_mostly = {
-       .family                 = AF_TIPC,
-       .addrtype               = TIPC_ADDR_NAMESEQ,
-       .addr.nameseq.type      = TIPC_TOP_SRV,
-       .addr.nameseq.lower     = TIPC_TOP_SRV,
-       .addr.nameseq.upper     = TIPC_TOP_SRV,
-       .scope                  = TIPC_NODE_SCOPE
-};
-
-static struct tipc_server topsrv __read_mostly = {
-       .saddr                  = &topsrv_addr,
-       .imp                    = TIPC_CRITICAL_IMPORTANCE,
-       .type                   = SOCK_SEQPACKET,
-       .max_rcvbuf_size        = sizeof(struct tipc_subscr),
-       .name                   = "topology_server",
-       .tipc_conn_recvmsg      = subscr_conn_msg_event,
-       .tipc_conn_new          = subscr_named_msg_event,
-       .tipc_conn_shutdown     = subscr_conn_shutdown_event,
-};
-
 /**
  * htohl - convert value to endianness used by destination
  * @in: value to convert
@@ -93,6 +66,7 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
                              u32 found_upper, u32 event, u32 port_ref,
                              u32 node)
 {
+       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
        struct tipc_subscriber *subscriber = sub->subscriber;
        struct kvec msg_sect;
 
@@ -103,8 +77,8 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
        sub->evt.found_upper = htohl(found_upper, sub->swap);
        sub->evt.port.ref = htohl(port_ref, sub->swap);
        sub->evt.port.node = htohl(node, sub->swap);
-       tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base,
-                         msg_sect.iov_len);
+       tipc_conn_sendmsg(tn->topsrv, subscriber->conid, NULL,
+                         msg_sect.iov_base, msg_sect.iov_len);
 }
 
 /**
@@ -141,9 +115,11 @@ void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
        subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
 }
 
-static void subscr_timeout(struct tipc_subscription *sub)
+static void subscr_timeout(unsigned long data)
 {
+       struct tipc_subscription *sub = (struct tipc_subscription *)data;
        struct tipc_subscriber *subscriber = sub->subscriber;
+       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
 
        /* The spin lock per subscriber is used to protect its members */
        spin_lock_bh(&subscriber->lock);
@@ -167,9 +143,8 @@ static void subscr_timeout(struct tipc_subscription *sub)
                          TIPC_SUBSCR_TIMEOUT, 0, 0);
 
        /* Now destroy subscription */
-       k_term_timer(&sub->timer);
        kfree(sub);
-       atomic_dec(&subscription_count);
+       atomic_dec(&tn->subscription_count);
 }
 
 /**
@@ -179,10 +154,12 @@ static void subscr_timeout(struct tipc_subscription *sub)
  */
 static void subscr_del(struct tipc_subscription *sub)
 {
+       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+
        tipc_nametbl_unsubscribe(sub);
        list_del(&sub->subscription_list);
        kfree(sub);
-       atomic_dec(&subscription_count);
+       atomic_dec(&tn->subscription_count);
 }
 
 /**
@@ -190,9 +167,12 @@ static void subscr_del(struct tipc_subscription *sub)
  *
  * Note: Must call it in process context since it might sleep.
  */
-static void subscr_terminate(struct tipc_subscriber *subscriber)
+static void subscr_terminate(struct tipc_subscription *sub)
 {
-       tipc_conn_terminate(&topsrv, subscriber->conid);
+       struct tipc_subscriber *subscriber = sub->subscriber;
+       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+
+       tipc_conn_terminate(tn->topsrv, subscriber->conid);
 }
 
 static void subscr_release(struct tipc_subscriber *subscriber)
@@ -207,8 +187,7 @@ static void subscr_release(struct tipc_subscriber *subscriber)
                                 subscription_list) {
                if (sub->timeout != TIPC_WAIT_FOREVER) {
                        spin_unlock_bh(&subscriber->lock);
-                       k_cancel_timer(&sub->timer);
-                       k_term_timer(&sub->timer);
+                       del_timer_sync(&sub->timer);
                        spin_lock_bh(&subscriber->lock);
                }
                subscr_del(sub);
@@ -250,8 +229,7 @@ static void subscr_cancel(struct tipc_subscr *s,
        if (sub->timeout != TIPC_WAIT_FOREVER) {
                sub->timeout = TIPC_WAIT_FOREVER;
                spin_unlock_bh(&subscriber->lock);
-               k_cancel_timer(&sub->timer);
-               k_term_timer(&sub->timer);
+               del_timer_sync(&sub->timer);
                spin_lock_bh(&subscriber->lock);
        }
        subscr_del(sub);
@@ -262,9 +240,11 @@ static void subscr_cancel(struct tipc_subscr *s,
  *
  * Called with subscriber lock held.
  */
-static int subscr_subscribe(struct tipc_subscr *s,
+static int subscr_subscribe(struct net *net, struct tipc_subscr *s,
                            struct tipc_subscriber *subscriber,
-                           struct tipc_subscription **sub_p) {
+                           struct tipc_subscription **sub_p)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_subscription *sub;
        int swap;
 
@@ -279,7 +259,7 @@ static int subscr_subscribe(struct tipc_subscr *s,
        }
 
        /* Refuse subscription if global limit exceeded */
-       if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
+       if (atomic_read(&tn->subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
                pr_warn("Subscription rejected, limit reached (%u)\n",
                        TIPC_MAX_SUBSCRIPTIONS);
                return -EINVAL;
@@ -293,10 +273,11 @@ static int subscr_subscribe(struct tipc_subscr *s,
        }
 
        /* Initialize subscription object */
+       sub->net = net;
        sub->seq.type = htohl(s->seq.type, swap);
        sub->seq.lower = htohl(s->seq.lower, swap);
        sub->seq.upper = htohl(s->seq.upper, swap);
-       sub->timeout = htohl(s->timeout, swap);
+       sub->timeout = msecs_to_jiffies(htohl(s->timeout, swap));
        sub->filter = htohl(s->filter, swap);
        if ((!(sub->filter & TIPC_SUB_PORTS) ==
             !(sub->filter & TIPC_SUB_SERVICE)) ||
@@ -309,11 +290,10 @@ static int subscr_subscribe(struct tipc_subscr *s,
        sub->subscriber = subscriber;
        sub->swap = swap;
        memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
-       atomic_inc(&subscription_count);
+       atomic_inc(&tn->subscription_count);
        if (sub->timeout != TIPC_WAIT_FOREVER) {
-               k_init_timer(&sub->timer,
-                            (Handler)subscr_timeout, (unsigned long)sub);
-               k_start_timer(&sub->timer, sub->timeout);
+               setup_timer(&sub->timer, subscr_timeout, (unsigned long)sub);
+               mod_timer(&sub->timer, jiffies + sub->timeout);
        }
        *sub_p = sub;
        return 0;
@@ -326,16 +306,18 @@ static void subscr_conn_shutdown_event(int conid, void *usr_data)
 }
 
 /* Handle one request to create a new subscription for the subscriber */
-static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
-                                 void *usr_data, void *buf, size_t len)
+static void subscr_conn_msg_event(struct net *net, int conid,
+                                 struct sockaddr_tipc *addr, void *usr_data,
+                                 void *buf, size_t len)
 {
        struct tipc_subscriber *subscriber = usr_data;
        struct tipc_subscription *sub = NULL;
 
        spin_lock_bh(&subscriber->lock);
-       if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) {
+       if (subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber,
+                            &sub) < 0) {
                spin_unlock_bh(&subscriber->lock);
-               subscr_terminate(subscriber);
+               subscr_terminate(sub);
                return;
        }
        if (sub)
@@ -343,7 +325,6 @@ static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
        spin_unlock_bh(&subscriber->lock);
 }
 
-
 /* Handle one request to establish a new subscriber */
 static void *subscr_named_msg_event(int conid)
 {
@@ -362,12 +343,50 @@ static void *subscr_named_msg_event(int conid)
        return (void *)subscriber;
 }
 
-int tipc_subscr_start(void)
+int tipc_subscr_start(struct net *net)
 {
-       return tipc_server_start(&topsrv);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       const char name[] = "topology_server";
+       struct tipc_server *topsrv;
+       struct sockaddr_tipc *saddr;
+
+       saddr = kzalloc(sizeof(*saddr), GFP_ATOMIC);
+       if (!saddr)
+               return -ENOMEM;
+       saddr->family                   = AF_TIPC;
+       saddr->addrtype                 = TIPC_ADDR_NAMESEQ;
+       saddr->addr.nameseq.type        = TIPC_TOP_SRV;
+       saddr->addr.nameseq.lower       = TIPC_TOP_SRV;
+       saddr->addr.nameseq.upper       = TIPC_TOP_SRV;
+       saddr->scope                    = TIPC_NODE_SCOPE;
+
+       topsrv = kzalloc(sizeof(*topsrv), GFP_ATOMIC);
+       if (!topsrv) {
+               kfree(saddr);
+               return -ENOMEM;
+       }
+       topsrv->net                     = net;
+       topsrv->saddr                   = saddr;
+       topsrv->imp                     = TIPC_CRITICAL_IMPORTANCE;
+       topsrv->type                    = SOCK_SEQPACKET;
+       topsrv->max_rcvbuf_size         = sizeof(struct tipc_subscr);
+       topsrv->tipc_conn_recvmsg       = subscr_conn_msg_event;
+       topsrv->tipc_conn_new           = subscr_named_msg_event;
+       topsrv->tipc_conn_shutdown      = subscr_conn_shutdown_event;
+
+       strncpy(topsrv->name, name, strlen(name) + 1);
+       tn->topsrv = topsrv;
+       atomic_set(&tn->subscription_count, 0);
+
+       return tipc_server_start(topsrv);
 }
 
-void tipc_subscr_stop(void)
+void tipc_subscr_stop(struct net *net)
 {
-       tipc_server_stop(&topsrv);
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_server *topsrv = tn->topsrv;
+
+       tipc_server_stop(topsrv);
+       kfree(topsrv->saddr);
+       kfree(topsrv);
 }
index 393e417bee3f51f8703047cdf6ac6a2a6b8654be..33488bd9fe3c9b1ebd6fe0b5545afe551121b198 100644 (file)
@@ -39,6 +39,9 @@
 
 #include "server.h"
 
+#define TIPC_MAX_SUBSCRIPTIONS 65535
+#define TIPC_MAX_PUBLICATIONS  65535
+
 struct tipc_subscription;
 struct tipc_subscriber;
 
@@ -46,6 +49,7 @@ struct tipc_subscriber;
  * struct tipc_subscription - TIPC network topology subscription object
  * @subscriber: pointer to its subscriber
  * @seq: name sequence associated with subscription
+ * @net: point to network namespace
  * @timeout: duration of subscription (in ms)
  * @filter: event filtering to be done for subscription
  * @timer: timer governing subscription duration (optional)
@@ -58,7 +62,8 @@ struct tipc_subscriber;
 struct tipc_subscription {
        struct tipc_subscriber *subscriber;
        struct tipc_name_seq seq;
-       u32 timeout;
+       struct net *net;
+       unsigned long timeout;
        u32 filter;
        struct timer_list timer;
        struct list_head nameseq_list;
@@ -69,13 +74,10 @@ struct tipc_subscription {
 
 int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
                        u32 found_upper);
-
 void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
                                u32 found_upper, u32 event, u32 port_ref,
                                u32 node, int must);
-
-int tipc_subscr_start(void);
-
-void tipc_subscr_stop(void);
+int tipc_subscr_start(struct net *net);
+void tipc_subscr_stop(struct net *net);
 
 #endif
index 8e1b10274b02702345abba0b4b458d8319f2f841..526b6edab018eefde3c77f259c0a5caeff51a6bc 100644 (file)
@@ -1445,7 +1445,6 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
                              struct msghdr *msg, size_t len)
 {
-       struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
        struct sock *sk = sock->sk;
        struct net *net = sock_net(sk);
        struct unix_sock *u = unix_sk(sk);
@@ -1456,14 +1455,12 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
        unsigned int hash;
        struct sk_buff *skb;
        long timeo;
-       struct scm_cookie tmp_scm;
+       struct scm_cookie scm;
        int max_level;
        int data_len = 0;
 
-       if (NULL == siocb->scm)
-               siocb->scm = &tmp_scm;
        wait_for_unix_gc();
-       err = scm_send(sock, msg, siocb->scm, false);
+       err = scm_send(sock, msg, &scm, false);
        if (err < 0)
                return err;
 
@@ -1507,11 +1504,11 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
        if (skb == NULL)
                goto out;
 
-       err = unix_scm_to_skb(siocb->scm, skb, true);
+       err = unix_scm_to_skb(&scm, skb, true);
        if (err < 0)
                goto out_free;
        max_level = err + 1;
-       unix_get_secdata(siocb->scm, skb);
+       unix_get_secdata(&scm, skb);
 
        skb_put(skb, len - data_len);
        skb->data_len = data_len;
@@ -1606,7 +1603,7 @@ restart:
        unix_state_unlock(other);
        other->sk_data_ready(other);
        sock_put(other);
-       scm_destroy(siocb->scm);
+       scm_destroy(&scm);
        return len;
 
 out_unlock:
@@ -1616,7 +1613,7 @@ out_free:
 out:
        if (other)
                sock_put(other);
-       scm_destroy(siocb->scm);
+       scm_destroy(&scm);
        return err;
 }
 
@@ -1628,21 +1625,18 @@ out:
 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
                               struct msghdr *msg, size_t len)
 {
-       struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
        struct sock *sk = sock->sk;
        struct sock *other = NULL;
        int err, size;
        struct sk_buff *skb;
        int sent = 0;
-       struct scm_cookie tmp_scm;
+       struct scm_cookie scm;
        bool fds_sent = false;
        int max_level;
        int data_len;
 
-       if (NULL == siocb->scm)
-               siocb->scm = &tmp_scm;
        wait_for_unix_gc();
-       err = scm_send(sock, msg, siocb->scm, false);
+       err = scm_send(sock, msg, &scm, false);
        if (err < 0)
                return err;
 
@@ -1683,7 +1677,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
                        goto out_err;
 
                /* Only send the fds in the first buffer */
-               err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
+               err = unix_scm_to_skb(&scm, skb, !fds_sent);
                if (err < 0) {
                        kfree_skb(skb);
                        goto out_err;
@@ -1715,8 +1709,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
                sent += size;
        }
 
-       scm_destroy(siocb->scm);
-       siocb->scm = NULL;
+       scm_destroy(&scm);
 
        return sent;
 
@@ -1728,8 +1721,7 @@ pipe_err:
                send_sig(SIGPIPE, current, 0);
        err = -EPIPE;
 out_err:
-       scm_destroy(siocb->scm);
-       siocb->scm = NULL;
+       scm_destroy(&scm);
        return sent ? : err;
 }
 
@@ -1778,8 +1770,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
                              struct msghdr *msg, size_t size,
                              int flags)
 {
-       struct sock_iocb *siocb = kiocb_to_siocb(iocb);
-       struct scm_cookie tmp_scm;
+       struct scm_cookie scm;
        struct sock *sk = sock->sk;
        struct unix_sock *u = unix_sk(sk);
        int noblock = flags & MSG_DONTWAIT;
@@ -1831,16 +1822,14 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (sock_flag(sk, SOCK_RCVTSTAMP))
                __sock_recv_timestamp(msg, sk, skb);
 
-       if (!siocb->scm) {
-               siocb->scm = &tmp_scm;
-               memset(&tmp_scm, 0, sizeof(tmp_scm));
-       }
-       scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
-       unix_set_secdata(siocb->scm, skb);
+       memset(&scm, 0, sizeof(scm));
+
+       scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
+       unix_set_secdata(&scm, skb);
 
        if (!(flags & MSG_PEEK)) {
                if (UNIXCB(skb).fp)
-                       unix_detach_fds(siocb->scm, skb);
+                       unix_detach_fds(&scm, skb);
 
                sk_peek_offset_bwd(sk, skb->len);
        } else {
@@ -1860,11 +1849,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
                sk_peek_offset_fwd(sk, size);
 
                if (UNIXCB(skb).fp)
-                       siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
+                       scm.fp = scm_fp_dup(UNIXCB(skb).fp);
        }
        err = (flags & MSG_TRUNC) ? skb->len - skip : size;
 
-       scm_recv(sock, msg, siocb->scm, flags);
+       scm_recv(sock, msg, &scm, flags);
 
 out_free:
        skb_free_datagram(sk, skb);
@@ -1915,8 +1904,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                               struct msghdr *msg, size_t size,
                               int flags)
 {
-       struct sock_iocb *siocb = kiocb_to_siocb(iocb);
-       struct scm_cookie tmp_scm;
+       struct scm_cookie scm;
        struct sock *sk = sock->sk;
        struct unix_sock *u = unix_sk(sk);
        DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
@@ -1943,10 +1931,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
         * while sleeps in memcpy_tomsg
         */
 
-       if (!siocb->scm) {
-               siocb->scm = &tmp_scm;
-               memset(&tmp_scm, 0, sizeof(tmp_scm));
-       }
+       memset(&scm, 0, sizeof(scm));
 
        err = mutex_lock_interruptible(&u->readlock);
        if (unlikely(err)) {
@@ -2012,13 +1997,13 @@ again:
 
                if (check_creds) {
                        /* Never glue messages from different writers */
-                       if ((UNIXCB(skb).pid  != siocb->scm->pid) ||
-                           !uid_eq(UNIXCB(skb).uid, siocb->scm->creds.uid) ||
-                           !gid_eq(UNIXCB(skb).gid, siocb->scm->creds.gid))
+                       if ((UNIXCB(skb).pid  != scm.pid) ||
+                           !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
+                           !gid_eq(UNIXCB(skb).gid, scm.creds.gid))
                                break;
                } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
                        /* Copy credentials */
-                       scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
+                       scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
                        check_creds = 1;
                }
 
@@ -2045,7 +2030,7 @@ again:
                        sk_peek_offset_bwd(sk, chunk);
 
                        if (UNIXCB(skb).fp)
-                               unix_detach_fds(siocb->scm, skb);
+                               unix_detach_fds(&scm, skb);
 
                        if (unix_skb_len(skb))
                                break;
@@ -2053,13 +2038,13 @@ again:
                        skb_unlink(skb, &sk->sk_receive_queue);
                        consume_skb(skb);
 
-                       if (siocb->scm->fp)
+                       if (scm.fp)
                                break;
                } else {
                        /* It is questionable, see note in unix_dgram_recvmsg.
                         */
                        if (UNIXCB(skb).fp)
-                               siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
+                               scm.fp = scm_fp_dup(UNIXCB(skb).fp);
 
                        sk_peek_offset_fwd(sk, chunk);
 
@@ -2068,7 +2053,7 @@ again:
        } while (size);
 
        mutex_unlock(&u->readlock);
-       scm_recv(sock, msg, siocb->scm, flags);
+       scm_recv(sock, msg, &scm, flags);
 out:
        return copied ? : err;
 }
index 86fa0f3b2cafa46d47db9cd03e46dfe89c22d238..ef542fbca9fe52dd85f6946e9d65ee8a28f6340e 100644 (file)
@@ -155,7 +155,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
        if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
                goto out_nlmsg_trim;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 out_nlmsg_trim:
        nlmsg_cancel(skb, nlh);
index 02d2e5229240dd635dfc3eb7b6f81e8aa970bf01..7f3255084a6c036074664240f4da6fb3cdd231ce 100644 (file)
@@ -1850,8 +1850,7 @@ static ssize_t vmci_transport_stream_enqueue(
        struct msghdr *msg,
        size_t len)
 {
-       /* XXX: stripping const */
-       return vmci_qpair_enquev(vmci_trans(vsk)->qpair, (struct iovec *)msg->msg_iter.iov, len, 0);
+       return vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg, len, 0);
 }
 
 static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk)
index 22ba971741e5c998c5f314e083ceead4b2f6a94f..29c8675f9a1189db65c185f2ad04f96a67702989 100644 (file)
@@ -175,7 +175,7 @@ config CFG80211_INTERNAL_REGDB
          Most distributions have a CRDA package.  So if unsure, say N.
 
 config CFG80211_WEXT
-       bool
+       bool "cfg80211 wireless extensions compatibility"
        depends on CFG80211
        select WEXT_CORE
        help
index c5661c5ad8f32817def75431bd8901a09189ccfe..d78fd8b54515e630b67bf38d710b2b698f703c4c 100644 (file)
@@ -397,6 +397,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
        [NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN },
        [NL80211_ATTR_WIPHY_SELF_MANAGED_REG] = { .type = NLA_FLAG },
+       [NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
+       [NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
 };
 
 /* policy for the key attributes */
@@ -1721,7 +1723,8 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
                break;
        }
  finish:
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
@@ -2404,7 +2407,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
                        goto nla_put_failure;
        }
 
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
@@ -2869,6 +2873,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->get_key)
                return -EOPNOTSUPP;
 
+       if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
+               return -ENOENT;
+
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
@@ -2888,10 +2895,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
                goto nla_put_failure;
 
-       if (pairwise && mac_addr &&
-           !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
-               return -ENOENT;
-
        err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
                           get_key_callback);
 
@@ -3062,7 +3065,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
        wdev_lock(dev->ieee80211_ptr);
        err = nl80211_key_allowed(dev->ieee80211_ptr);
 
-       if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr &&
+       if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
            !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
                err = -ENOENT;
 
@@ -3838,7 +3841,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
                    sinfo->assoc_req_ies))
                goto nla_put_failure;
 
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
@@ -4568,7 +4572,8 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq,
 
        nla_nest_end(msg, pinfoattr);
 
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
@@ -5520,7 +5525,8 @@ static int nl80211_send_regdom(struct sk_buff *msg, struct netlink_callback *cb,
            nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG))
                goto nla_put_failure;
 
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
 nla_put_failure:
        genlmsg_cancel(msg, hdr);
@@ -5774,7 +5780,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
                request->ssids = (void *)&request->channels[n_channels];
        request->n_ssids = n_ssids;
        if (ie_len) {
-               if (request->ssids)
+               if (n_ssids)
                        request->ie = (void *)(request->ssids + n_ssids);
                else
                        request->ie = (void *)(request->channels + n_channels);
@@ -5830,7 +5836,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
        request->n_channels = i;
 
        i = 0;
-       if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
+       if (n_ssids) {
                nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
                        if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
                                err = -EINVAL;
@@ -6028,7 +6034,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
                request->ssids = (void *)&request->channels[n_channels];
        request->n_ssids = n_ssids;
        if (ie_len) {
-               if (request->ssids)
+               if (n_ssids)
                        request->ie = (void *)(request->ssids + n_ssids);
                else
                        request->ie = (void *)(request->channels + n_channels);
@@ -6037,7 +6043,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
        if (n_match_sets) {
                if (request->ie)
                        request->match_sets = (void *)(request->ie + ie_len);
-               else if (request->ssids)
+               else if (n_ssids)
                        request->match_sets =
                                (void *)(request->ssids + n_ssids);
                else
@@ -6096,7 +6102,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
        request->n_channels = i;
 
        i = 0;
-       if (attrs[NL80211_ATTR_SCAN_SSIDS]) {
+       if (n_ssids) {
                nla_for_each_nested(attr, attrs[NL80211_ATTR_SCAN_SSIDS],
                                    tmp) {
                        if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
@@ -6204,6 +6210,10 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
                }
        }
 
+       if (attrs[NL80211_ATTR_SCHED_SCAN_DELAY])
+               request->delay =
+                       nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]);
+
        request->interval = interval;
        request->scan_start = jiffies;
 
@@ -6590,7 +6600,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
 
        nla_nest_end(msg, bss);
 
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
  fail_unlock_rcu:
        rcu_read_unlock();
@@ -6699,7 +6710,8 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq,
 
        nla_nest_end(msg, infoattr);
 
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
@@ -7762,14 +7774,19 @@ static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info)
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct net *net;
        int err;
-       u32 pid;
 
-       if (!info->attrs[NL80211_ATTR_PID])
-               return -EINVAL;
+       if (info->attrs[NL80211_ATTR_PID]) {
+               u32 pid = nla_get_u32(info->attrs[NL80211_ATTR_PID]);
+
+               net = get_net_ns_by_pid(pid);
+       } else if (info->attrs[NL80211_ATTR_NETNS_FD]) {
+               u32 fd = nla_get_u32(info->attrs[NL80211_ATTR_NETNS_FD]);
 
-       pid = nla_get_u32(info->attrs[NL80211_ATTR_PID]);
+               net = get_net_ns_by_fd(fd);
+       } else {
+               return -EINVAL;
+       }
 
-       net = get_net_ns_by_pid(pid);
        if (IS_ERR(net))
                return PTR_ERR(net);
 
@@ -11038,7 +11055,8 @@ static int nl80211_send_scan_msg(struct sk_buff *msg,
        /* ignore errors and send incomplete event anyway */
        nl80211_add_scan_req(msg, rdev);
 
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
@@ -11061,7 +11079,8 @@ nl80211_send_sched_scan_msg(struct sk_buff *msg,
            nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
                goto nla_put_failure;
 
-       return genlmsg_end(msg, hdr);
+       genlmsg_end(msg, hdr);
+       return 0;
 
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
index 886cc7cb5566d1502a8383a561c2c5eb3ee475f6..b586d0dcb09ebc9382fa0bd22016264e5bdd21c2 100644 (file)
@@ -1533,45 +1533,40 @@ static void reg_call_notifier(struct wiphy *wiphy,
 
 static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
 {
-       struct ieee80211_channel *ch;
        struct cfg80211_chan_def chandef;
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
-       bool ret = true;
+       enum nl80211_iftype iftype;
 
        wdev_lock(wdev);
+       iftype = wdev->iftype;
 
+       /* make sure the interface is active */
        if (!wdev->netdev || !netif_running(wdev->netdev))
-               goto out;
+               goto wdev_inactive_unlock;
 
-       switch (wdev->iftype) {
+       switch (iftype) {
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_P2P_GO:
                if (!wdev->beacon_interval)
-                       goto out;
-
-               ret = cfg80211_reg_can_beacon(wiphy,
-                                             &wdev->chandef, wdev->iftype);
+                       goto wdev_inactive_unlock;
+               chandef = wdev->chandef;
                break;
        case NL80211_IFTYPE_ADHOC:
                if (!wdev->ssid_len)
-                       goto out;
-
-               ret = cfg80211_reg_can_beacon(wiphy,
-                                             &wdev->chandef, wdev->iftype);
+                       goto wdev_inactive_unlock;
+               chandef = wdev->chandef;
                break;
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
                if (!wdev->current_bss ||
                    !wdev->current_bss->pub.channel)
-                       goto out;
+                       goto wdev_inactive_unlock;
 
-               ch = wdev->current_bss->pub.channel;
-               if (rdev->ops->get_channel &&
-                   !rdev_get_channel(rdev, wdev, &chandef))
-                       ret = cfg80211_chandef_usable(wiphy, &chandef,
-                                                     IEEE80211_CHAN_DISABLED);
-               else
-                       ret = !(ch->flags & IEEE80211_CHAN_DISABLED);
+               if (!rdev->ops->get_channel ||
+                   rdev_get_channel(rdev, wdev, &chandef))
+                       cfg80211_chandef_create(&chandef,
+                                               wdev->current_bss->pub.channel,
+                                               NL80211_CHAN_NO_HT);
                break;
        case NL80211_IFTYPE_MONITOR:
        case NL80211_IFTYPE_AP_VLAN:
@@ -1584,9 +1579,26 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
                break;
        }
 
-out:
        wdev_unlock(wdev);
-       return ret;
+
+       switch (iftype) {
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_ADHOC:
+               return cfg80211_reg_can_beacon(wiphy, &chandef, iftype);
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_CLIENT:
+               return cfg80211_chandef_usable(wiphy, &chandef,
+                                              IEEE80211_CHAN_DISABLED);
+       default:
+               break;
+       }
+
+       return true;
+
+wdev_inactive_unlock:
+       wdev_unlock(wdev);
+       return true;
 }
 
 static void reg_leave_invalid_chans(struct wiphy *wiphy)
index 3535e8ade48f2ee10bd5f544f130b36768114f33..6903dbdcb8c1f03bcef684ad1074e3dea9f18e18 100644 (file)
@@ -227,18 +227,32 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
        if (pairwise && !mac_addr)
                return -EINVAL;
 
-       /*
-        * Disallow pairwise keys with non-zero index unless it's WEP
-        * or a vendor specific cipher (because current deployments use
-        * pairwise WEP keys with non-zero indices and for vendor specific
-        * ciphers this should be validated in the driver or hardware level
-        * - but 802.11i clearly specifies to use zero)
-        */
-       if (pairwise && key_idx &&
-           ((params->cipher == WLAN_CIPHER_SUITE_TKIP) ||
-            (params->cipher == WLAN_CIPHER_SUITE_CCMP) ||
-            (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC)))
-               return -EINVAL;
+       switch (params->cipher) {
+       case WLAN_CIPHER_SUITE_TKIP:
+       case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_CCMP_256:
+       case WLAN_CIPHER_SUITE_GCMP:
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               /* Disallow pairwise keys with non-zero index unless it's WEP
+                * or a vendor specific cipher (because current deployments use
+                * pairwise WEP keys with non-zero indices and for vendor
+                * specific ciphers this should be validated in the driver or
+                * hardware level - but 802.11i clearly specifies to use zero)
+                */
+               if (pairwise && key_idx)
+                       return -EINVAL;
+               break;
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               /* Disallow BIP (group-only) cipher as pairwise cipher */
+               if (pairwise)
+                       return -EINVAL;
+               break;
+       default:
+               break;
+       }
 
        switch (params->cipher) {
        case WLAN_CIPHER_SUITE_WEP40:
@@ -253,6 +267,18 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
                if (params->key_len != WLAN_KEY_LEN_CCMP)
                        return -EINVAL;
                break;
+       case WLAN_CIPHER_SUITE_CCMP_256:
+               if (params->key_len != WLAN_KEY_LEN_CCMP_256)
+                       return -EINVAL;
+               break;
+       case WLAN_CIPHER_SUITE_GCMP:
+               if (params->key_len != WLAN_KEY_LEN_GCMP)
+                       return -EINVAL;
+               break;
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               if (params->key_len != WLAN_KEY_LEN_GCMP_256)
+                       return -EINVAL;
+               break;
        case WLAN_CIPHER_SUITE_WEP104:
                if (params->key_len != WLAN_KEY_LEN_WEP104)
                        return -EINVAL;
@@ -261,6 +287,18 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
                if (params->key_len != WLAN_KEY_LEN_AES_CMAC)
                        return -EINVAL;
                break;
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+               if (params->key_len != WLAN_KEY_LEN_BIP_CMAC_256)
+                       return -EINVAL;
+               break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+               if (params->key_len != WLAN_KEY_LEN_BIP_GMAC_128)
+                       return -EINVAL;
+               break;
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+               if (params->key_len != WLAN_KEY_LEN_BIP_GMAC_256)
+                       return -EINVAL;
+               break;
        default:
                /*
                 * We don't know anything about this algorithm,
@@ -280,7 +318,13 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
                        return -EINVAL;
                case WLAN_CIPHER_SUITE_TKIP:
                case WLAN_CIPHER_SUITE_CCMP:
+               case WLAN_CIPHER_SUITE_CCMP_256:
+               case WLAN_CIPHER_SUITE_GCMP:
+               case WLAN_CIPHER_SUITE_GCMP_256:
                case WLAN_CIPHER_SUITE_AES_CMAC:
+               case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+               case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+               case WLAN_CIPHER_SUITE_BIP_GMAC_256:
                        if (params->seq_len != 6)
                                return -EINVAL;
                        break;
@@ -308,6 +352,12 @@ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
                goto out;
        }
 
+       if (ieee80211_is_mgmt(fc)) {
+               if (ieee80211_has_order(fc))
+                       hdrlen += IEEE80211_HT_CTL_LEN;
+               goto out;
+       }
+
        if (ieee80211_is_ctl(fc)) {
                /*
                 * ACK and CTS are 10 bytes, all others 16. To see how
@@ -708,8 +758,8 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
        if (skb->priority >= 256 && skb->priority <= 263)
                return skb->priority - 256;
 
-       if (vlan_tx_tag_present(skb)) {
-               vlan_priority = (vlan_tx_tag_get(skb) & VLAN_PRIO_MASK)
+       if (skb_vlan_tag_present(skb)) {
+               vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK)
                        >> VLAN_PRIO_SHIFT;
                if (vlan_priority > 0)
                        return vlan_priority;
index debe733386f859b9456db893c78b8a44e9fce14c..12e82a5e4ad5873e716014e421be8e329587e00c 100644 (file)
@@ -561,11 +561,6 @@ static struct xfrm_algo_desc calg_list[] = {
 },
 };
 
-static inline int aead_entries(void)
-{
-       return ARRAY_SIZE(aead_list);
-}
-
 static inline int aalg_entries(void)
 {
        return ARRAY_SIZE(aalg_list);
index 8128594ab3797e4814e77ef80246456641458027..7de2ed9ec46ddd003ef3db00554335a12f95cb89 100644 (file)
@@ -1019,7 +1019,8 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
                return err;
        }
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 }
 
 static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1121,7 +1122,8 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
                return err;
        }
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 }
 
 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1842,7 +1844,8 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
        if (err)
                goto out_cancel;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 out_cancel:
        nlmsg_cancel(skb, nlh);
@@ -2282,7 +2285,8 @@ static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
                        goto out_cancel;
        }
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 
 out_cancel:
        nlmsg_cancel(skb, nlh);
@@ -2490,7 +2494,8 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
        if (err)
                return err;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 }
 
 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
@@ -2712,7 +2717,8 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
                return err;
        }
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 }
 
 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
@@ -2827,7 +2833,8 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
        }
        upe->hard = !!hard;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 }
 
 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
@@ -2986,7 +2993,8 @@ static int build_report(struct sk_buff *skb, u8 proto,
                        return err;
                }
        }
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 }
 
 static int xfrm_send_report(struct net *net, u8 proto,
@@ -3031,7 +3039,8 @@ static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
        um->old_sport = x->encap->encap_sport;
        um->reqid = x->props.reqid;
 
-       return nlmsg_end(skb, nlh);
+       nlmsg_end(skb, nlh);
+       return 0;
 }
 
 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
index e286b42307f30dfe840267be16c53b0580150905..6299ee95cd11b63112ae5b7875872cb72ca91208 100644 (file)
@@ -69,9 +69,9 @@ static void test_hashmap_sanity(int i, void *data)
 
        /* iterate over two elements */
        assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 &&
-              next_key == 2);
+              (next_key == 1 || next_key == 2));
        assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 &&
-              next_key == 1);
+              (next_key == 1 || next_key == 2));
        assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 &&
               errno == ENOENT);
 
index 1bca180db8ad0b6475464f3ca8ea78c881c1b1c5..627f8cbbedb88ca29667bbf1f88eb2004d5ee461 100644 (file)
@@ -42,19 +42,19 @@ __clean-files       := $(extra-y) $(extra-m) $(extra-)       \
 
 __clean-files   := $(filter-out $(no-clean-files), $(__clean-files))
 
-# as clean-files is given relative to the current directory, this adds
-# a $(obj) prefix, except for absolute paths
+# clean-files is given relative to the current directory, unless it
+# starts with $(objtree)/ (which means "./", so do not add "./" unless
+# you want to delete a file from the toplevel object directory).
 
 __clean-files   := $(wildcard                                               \
-                   $(addprefix $(obj)/, $(filter-out /%, $(__clean-files))) \
-                  $(filter /%, $(__clean-files)))
+                  $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(__clean-files))) \
+                  $(filter $(objtree)/%, $(__clean-files)))
 
-# as clean-dirs is given relative to the current directory, this adds
-# a $(obj) prefix, except for absolute paths
+# same as clean-files
 
 __clean-dirs    := $(wildcard                                               \
-                   $(addprefix $(obj)/, $(filter-out /%, $(clean-dirs)))    \
-                  $(filter /%, $(clean-dirs)))
+                  $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(clean-dirs)))    \
+                  $(filter $(objtree)/%, $(clean-dirs)))
 
 # ==========================================================================
 
index 56ea99a12ab79c82e062b54cfd34d9a39ef6a867..537c38ca2e1c7008b2f0f98d4e172c714162eb79 100755 (executable)
@@ -255,7 +255,6 @@ if ($arch eq "x86_64") {
     # force flags for this arch
     $ld .= " -m shlelf_linux";
     $objcopy .= " -O elf32-sh-linux";
-    $cc .= " -m32";
 
 } elsif ($arch eq "powerpc") {
     $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
index 9609a7f0faea2d53d2ca7c642cf24935012cdaae..c7952375ac5325cfb4c403fa1020671b5f31a150 100644 (file)
@@ -148,12 +148,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
                if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
                        atomic_dec(&key->user->nikeys);
 
-               key_user_put(key->user);
-
                /* now throw away the key memory */
                if (key->type->destroy)
                        key->type->destroy(key);
 
+               key_user_put(key->user);
+
                kfree(key->description);
 
 #ifdef KEY_DEBUGGING
index ec667f158f192c4467e020164ecd4adf30a5ce04..5d905d90d504c0d588b62bb856e7314e23488ec0 100644 (file)
@@ -81,36 +81,6 @@ struct snd_seq_dummy_port {
 
 static int my_client = -1;
 
-/*
- * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events
- * to subscribers.
- * Note: this callback is called only after all subscribers are removed.
- */
-static int
-dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info)
-{
-       struct snd_seq_dummy_port *p;
-       int i;
-       struct snd_seq_event ev;
-
-       p = private_data;
-       memset(&ev, 0, sizeof(ev));
-       if (p->duplex)
-               ev.source.port = p->connect;
-       else
-               ev.source.port = p->port;
-       ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
-       ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
-       for (i = 0; i < 16; i++) {
-               ev.data.control.channel = i;
-               ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF;
-               snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
-               ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS;
-               snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
-       }
-       return 0;
-}
-
 /*
  * event input callback - just redirect events to subscribers
  */
@@ -175,7 +145,6 @@ create_port(int idx, int type)
                | SNDRV_SEQ_PORT_TYPE_PORT;
        memset(&pcb, 0, sizeof(pcb));
        pcb.owner = THIS_MODULE;
-       pcb.unuse = dummy_unuse;
        pcb.event_input = dummy_input;
        pcb.private_free = dummy_free;
        pcb.private_data = rec;
index 3badc70124ab19d9b2e6115198e88c46718676e9..0d580186ef1ac379bcd2cb699ac2f33baeac9029 100644 (file)
 #define CYCLES_PER_SECOND      8000
 #define TICKS_PER_SECOND       (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
 
-#define TRANSFER_DELAY_TICKS   0x2e00 /* 479.17 µs */
+/*
+ * Nominally 3125 bytes/second, but the MIDI port's clock might be
+ * 1% too slow, and the bus clock 100 ppm too fast.
+ */
+#define MIDI_BYTES_PER_SECOND  3093
+
+/*
+ * Several devices look only at the first eight data blocks.
+ * In any case, this is more than enough for the MIDI data rate.
+ */
+#define MAX_MIDI_RX_BLOCKS     8
+
+#define TRANSFER_DELAY_TICKS   0x2e00 /* 479.17 Âµs */
 
 /* isochronous header parameters */
 #define ISO_DATA_LENGTH_SHIFT  16
@@ -78,8 +90,6 @@ int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
        s->callbacked = false;
        s->sync_slave = NULL;
 
-       s->rx_blocks_for_midi = UINT_MAX;
-
        return 0;
 }
 EXPORT_SYMBOL(amdtp_stream_init);
@@ -222,6 +232,14 @@ sfc_found:
        for (i = 0; i < pcm_channels; i++)
                s->pcm_positions[i] = i;
        s->midi_position = s->pcm_channels;
+
+       /*
+        * We do not know the actual MIDI FIFO size of most devices.  Just
+        * assume two bytes, i.e., one byte can be received over the bus while
+        * the previous one is transmitted over MIDI.
+        * (The value here is adjusted for midi_ratelimit_per_packet().)
+        */
+       s->midi_fifo_limit = rate - MIDI_BYTES_PER_SECOND * s->syt_interval + 1;
 }
 EXPORT_SYMBOL(amdtp_stream_set_parameters);
 
@@ -463,6 +481,36 @@ static void amdtp_fill_pcm_silence(struct amdtp_stream *s,
        }
 }
 
+/*
+ * To avoid sending MIDI bytes at too high a rate, assume that the receiving
+ * device has a FIFO, and track how much it is filled.  This values increases
+ * by one whenever we send one byte in a packet, but the FIFO empties at
+ * a constant rate independent of our packet rate.  One packet has syt_interval
+ * samples, so the number of bytes that empty out of the FIFO, per packet(!),
+ * is MIDI_BYTES_PER_SECOND * syt_interval / sample_rate.  To avoid storing
+ * fractional values, the values in midi_fifo_used[] are measured in bytes
+ * multiplied by the sample rate.
+ */
+static bool midi_ratelimit_per_packet(struct amdtp_stream *s, unsigned int port)
+{
+       int used;
+
+       used = s->midi_fifo_used[port];
+       if (used == 0) /* common shortcut */
+               return true;
+
+       used -= MIDI_BYTES_PER_SECOND * s->syt_interval;
+       used = max(used, 0);
+       s->midi_fifo_used[port] = used;
+
+       return used < s->midi_fifo_limit;
+}
+
+static void midi_rate_use_one_byte(struct amdtp_stream *s, unsigned int port)
+{
+       s->midi_fifo_used[port] += amdtp_rate_table[s->sfc];
+}
+
 static void amdtp_fill_midi(struct amdtp_stream *s,
                            __be32 *buffer, unsigned int frames)
 {
@@ -470,16 +518,21 @@ static void amdtp_fill_midi(struct amdtp_stream *s,
        u8 *b;
 
        for (f = 0; f < frames; f++) {
-               buffer[s->midi_position] = 0;
                b = (u8 *)&buffer[s->midi_position];
 
                port = (s->data_block_counter + f) % 8;
-               if ((f >= s->rx_blocks_for_midi) ||
-                   (s->midi[port] == NULL) ||
-                   (snd_rawmidi_transmit(s->midi[port], b + 1, 1) <= 0))
-                       b[0] = 0x80;
-               else
+               if (f < MAX_MIDI_RX_BLOCKS &&
+                   midi_ratelimit_per_packet(s, port) &&
+                   s->midi[port] != NULL &&
+                   snd_rawmidi_transmit(s->midi[port], &b[1], 1) == 1) {
+                       midi_rate_use_one_byte(s, port);
                        b[0] = 0x81;
+               } else {
+                       b[0] = 0x80;
+                       b[1] = 0;
+               }
+               b[2] = 0;
+               b[3] = 0;
 
                buffer += s->data_block_quadlets;
        }
index e6e8926275b0542c9891373e2d3d473cbd682286..8a03a91e728b0f9bc4feb23a4bdd9f22fdf5952d 100644 (file)
@@ -148,13 +148,12 @@ struct amdtp_stream {
        bool double_pcm_frames;
 
        struct snd_rawmidi_substream *midi[AMDTP_MAX_CHANNELS_FOR_MIDI * 8];
+       int midi_fifo_limit;
+       int midi_fifo_used[AMDTP_MAX_CHANNELS_FOR_MIDI * 8];
 
        /* quirk: fixed interval of dbc between previos/current packets. */
        unsigned int tx_dbc_interval;
 
-       /* quirk: the first count of data blocks in an rx packet for MIDI */
-       unsigned int rx_blocks_for_midi;
-
        bool callbacked;
        wait_queue_head_t callback_wait;
        struct amdtp_stream *sync_slave;
index 1aab0a32870c84d20a0c5b87f46d625299b34fd4..0ebcabfdc7ce0162c9a77ed30ca038e6588cae63 100644 (file)
@@ -484,13 +484,6 @@ int snd_bebob_stream_init_duplex(struct snd_bebob *bebob)
                amdtp_stream_destroy(&bebob->rx_stream);
                destroy_both_connections(bebob);
        }
-       /*
-        * The firmware for these devices ignore MIDI messages in more than
-        * first 8 data blocks of an received AMDTP packet.
-        */
-       if (bebob->spec == &maudio_fw410_spec ||
-           bebob->spec == &maudio_special_spec)
-               bebob->rx_stream.rx_blocks_for_midi = 8;
 end:
        return err;
 }
index b985fc5ebdc6b9cc490e41695d76b579a7b3fe3d..4f440e16366780f097d8c03daeb45e01fb5e7eb0 100644 (file)
@@ -179,11 +179,6 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
                destroy_stream(efw, &efw->tx_stream);
                goto end;
        }
-       /*
-        * Fireworks ignores MIDI messages in more than first 8 data
-        * blocks of an received AMDTP packet.
-        */
-       efw->rx_stream.rx_blocks_for_midi = 8;
 
        /* set IEC61883 compliant mode (actually not fully compliant...) */
        err = snd_efw_command_set_tx_mode(efw, SND_EFW_TRANSPORT_MODE_IEC61883);
index 255dabc6fc3313debc4b943d02e80a15b9e7f70a..2a85e4209f0b74d6f169dd705ffbf6fc9a2d3402 100644 (file)
@@ -124,7 +124,7 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
        spin_lock_irq(&efw->lock);
 
        t = (struct snd_efw_transaction *)data;
-       length = min_t(size_t, t->length * sizeof(t->length), length);
+       length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
 
        if (efw->push_ptr < efw->pull_ptr)
                capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
index 5f13d2d180791fb4cd674ee52ffcdb84ffc9c2d3..b422e406a9cb3ba284772d4fbd6d42854de40b95 100644 (file)
@@ -3353,6 +3353,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0067, .name = "MCP67 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x10de0070, .name = "GPU 70 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0071, .name = "GPU 71 HDMI/DP",  .patch = patch_nvhdmi },
+{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de8001, .name = "MCP73 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x11069f80, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
 { .id = 0x11069f81, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
@@ -3413,6 +3414,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0060");
 MODULE_ALIAS("snd-hda-codec-id:10de0067");
 MODULE_ALIAS("snd-hda-codec-id:10de0070");
 MODULE_ALIAS("snd-hda-codec-id:10de0071");
+MODULE_ALIAS("snd-hda-codec-id:10de0072");
 MODULE_ALIAS("snd-hda-codec-id:10de8001");
 MODULE_ALIAS("snd-hda-codec-id:11069f80");
 MODULE_ALIAS("snd-hda-codec-id:11069f81");
index 4f6413e01c133567a2c01ccee10543d6fc82a864..605d14003d257cb645b3519541d18a9b3045790a 100644 (file)
@@ -568,9 +568,9 @@ static void stac_store_hints(struct hda_codec *codec)
                        spec->gpio_mask;
        }
        if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir))
-               spec->gpio_mask &= spec->gpio_mask;
-       if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
                spec->gpio_dir &= spec->gpio_mask;
+       if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
+               spec->gpio_data &= spec->gpio_mask;
        if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask))
                spec->eapd_mask &= spec->gpio_mask;
        if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
index 7752860f7230e2821ac906715c06007543e84499..4c23381727a1ed1c53406b596ff04d9cd2e68577 100644 (file)
@@ -240,6 +240,8 @@ static int axi_i2s_probe(struct platform_device *pdev)
        if (ret)
                goto err_clk_disable;
 
+       return 0;
+
 err_clk_disable:
        clk_disable_unprepare(i2s->clk);
        return ret;
index e5f2fb884bf34688bc42add58e0b93cc5b863a9b..30c673cdc12ed409a720836d139cd48b7ed5d134 100644 (file)
@@ -188,8 +188,8 @@ static const DECLARE_TLV_DB_SCALE(boost_tlv, 0, 80, 0);
 static const char * const pcm512x_dsp_program_texts[] = {
        "FIR interpolation with de-emphasis",
        "Low latency IIR with de-emphasis",
-       "Fixed process flow",
        "High attenuation with de-emphasis",
+       "Fixed process flow",
        "Ringing-less low latency FIR",
 };
 
index 2cd4fe463102d4532458e1ed3402abd276be8fc1..1d1c7f8a9af27329a10dacf22c8752093dba91f8 100644 (file)
@@ -861,10 +861,8 @@ static int rt286_hw_params(struct snd_pcm_substream *substream,
                RT286_I2S_CTRL1, 0x0018, d_len_code << 3);
        dev_dbg(codec->dev, "format val = 0x%x\n", val);
 
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val);
-       else
-               snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val);
+       snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val);
+       snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val);
 
        return 0;
 }
index 81fe1464d2686661047aad6f2b9334f5d6a65c22..918ada9738b0431e9d47d0dc5155cd42d9e2fb2a 100644 (file)
@@ -784,8 +784,8 @@ static unsigned int bst_tlv[] = {
 static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
                struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-       struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+       struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+       struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
 
        ucontrol->value.integer.value[0] = rt5677->dsp_vad_en;
 
@@ -795,8 +795,9 @@ static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
 static int rt5677_dsp_vad_put(struct snd_kcontrol *kcontrol,
                struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-       struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+       struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+       struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
+       struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
 
        rt5677->dsp_vad_en = !!ucontrol->value.integer.value[0];
 
@@ -2082,10 +2083,14 @@ static int rt5677_set_pll1_event(struct snd_soc_dapm_widget *w,
        struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
 
        switch (event) {
-       case SND_SOC_DAPM_POST_PMU:
+       case SND_SOC_DAPM_PRE_PMU:
                regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x2);
+               break;
+
+       case SND_SOC_DAPM_POST_PMU:
                regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x0);
                break;
+
        default:
                return 0;
        }
@@ -2100,10 +2105,14 @@ static int rt5677_set_pll2_event(struct snd_soc_dapm_widget *w,
        struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
 
        switch (event) {
-       case SND_SOC_DAPM_POST_PMU:
+       case SND_SOC_DAPM_PRE_PMU:
                regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x2);
+               break;
+
+       case SND_SOC_DAPM_POST_PMU:
                regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x0);
                break;
+
        default:
                return 0;
        }
@@ -2211,9 +2220,11 @@ static int rt5677_vref_event(struct snd_soc_dapm_widget *w,
 
 static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
        SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT,
-               0, rt5677_set_pll1_event, SND_SOC_DAPM_POST_PMU),
+               0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU |
+               SND_SOC_DAPM_POST_PMU),
        SND_SOC_DAPM_SUPPLY("PLL2", RT5677_PWR_ANLG2, RT5677_PWR_PLL2_BIT,
-               0, rt5677_set_pll2_event, SND_SOC_DAPM_POST_PMU),
+               0, rt5677_set_pll2_event, SND_SOC_DAPM_PRE_PMU |
+               SND_SOC_DAPM_POST_PMU),
 
        /* Input Side */
        /* micbias */
index 1d1205702d2324bcef0b41d9682a49549bc4f6e0..9f2dced046de4b0ade76fcb8f088237f9443a643 100644 (file)
@@ -254,6 +254,7 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,
        struct ts3a227e *ts3a227e;
        struct device *dev = &i2c->dev;
        int ret;
+       unsigned int acc_reg;
 
        ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL);
        if (ts3a227e == NULL)
@@ -283,6 +284,11 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,
                           INTB_DISABLE | ADC_COMPLETE_INT_DISABLE,
                           ADC_COMPLETE_INT_DISABLE);
 
+       /* Read jack status because chip might not trigger interrupt at boot. */
+       regmap_read(ts3a227e->regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg);
+       ts3a227e_new_jack_state(ts3a227e, acc_reg);
+       ts3a227e_jack_report(ts3a227e);
+
        return 0;
 }
 
index 4d2d2b1380d59d2b8e1043acbdd29d314eef1b97..75b87c5c0f046286f41ceace869091f6323d9f4c 100644 (file)
@@ -1076,10 +1076,13 @@ static const struct snd_soc_dapm_route adc_intercon[] = {
        { "Right Capture PGA", NULL, "Right Capture Mux" },
        { "Right Capture PGA", NULL, "Right Capture Inverting Mux" },
 
-       { "AIFOUTL", "Left",  "ADCL" },
-       { "AIFOUTL", "Right", "ADCR" },
-       { "AIFOUTR", "Left",  "ADCL" },
-       { "AIFOUTR", "Right", "ADCR" },
+       { "AIFOUTL Mux", "Left", "ADCL" },
+       { "AIFOUTL Mux", "Right", "ADCR" },
+       { "AIFOUTR Mux", "Left", "ADCL" },
+       { "AIFOUTR Mux", "Right", "ADCR" },
+
+       { "AIFOUTL", NULL, "AIFOUTL Mux" },
+       { "AIFOUTR", NULL, "AIFOUTR Mux" },
 
        { "ADCL", NULL, "CLK_DSP" },
        { "ADCL", NULL, "Left Capture PGA" },
@@ -1089,12 +1092,16 @@ static const struct snd_soc_dapm_route adc_intercon[] = {
 };
 
 static const struct snd_soc_dapm_route dac_intercon[] = {
-       { "DACL", "Right", "AIFINR" },
-       { "DACL", "Left",  "AIFINL" },
+       { "DACL Mux", "Left", "AIFINL" },
+       { "DACL Mux", "Right", "AIFINR" },
+
+       { "DACR Mux", "Left", "AIFINL" },
+       { "DACR Mux", "Right", "AIFINR" },
+
+       { "DACL", NULL, "DACL Mux" },
        { "DACL", NULL, "CLK_DSP" },
 
-       { "DACR", "Right", "AIFINR" },
-       { "DACR", "Left",  "AIFINL" },
+       { "DACR", NULL, "DACR Mux" },
        { "DACR", NULL, "CLK_DSP" },
 
        { "Charge pump", NULL, "SYSCLK" },
index 031a1ae71d943f2782d08f911d24c79d38d078ab..a96eb497a3796e9b67b539eae255a316af52864b 100644 (file)
@@ -556,7 +556,7 @@ static struct {
        { 22050, 2 },
        { 24000, 2 },
        { 16000, 3 },
-       { 11250, 4 },
+       { 11025, 4 },
        { 12000, 4 },
        {  8000, 5 },
 };
index b93168d4f6489ee667d94d89a7271e6b4eba8ad5..8d18bbda661b66412dba2b93246bc7069cc61c80 100644 (file)
@@ -209,16 +209,9 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
 
        switch (config->chan_nr) {
        case EIGHT_CHANNEL_SUPPORT:
-               ch_reg = 3;
-               break;
        case SIX_CHANNEL_SUPPORT:
-               ch_reg = 2;
-               break;
        case FOUR_CHANNEL_SUPPORT:
-               ch_reg = 1;
-               break;
        case TWO_CHANNEL_SUPPORT:
-               ch_reg = 0;
                break;
        default:
                dev_err(dev->dev, "channel not supported\n");
@@ -227,18 +220,22 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
 
        i2s_disable_channels(dev, substream->stream);
 
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               i2s_write_reg(dev->i2s_base, TCR(ch_reg), xfer_resolution);
-               i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02);
-               irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
-               i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30);
-               i2s_write_reg(dev->i2s_base, TER(ch_reg), 1);
-       } else {
-               i2s_write_reg(dev->i2s_base, RCR(ch_reg), xfer_resolution);
-               i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07);
-               irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
-               i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03);
-               i2s_write_reg(dev->i2s_base, RER(ch_reg), 1);
+       for (ch_reg = 0; ch_reg < (config->chan_nr / 2); ch_reg++) {
+               if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+                       i2s_write_reg(dev->i2s_base, TCR(ch_reg),
+                                     xfer_resolution);
+                       i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02);
+                       irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
+                       i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30);
+                       i2s_write_reg(dev->i2s_base, TER(ch_reg), 1);
+               } else {
+                       i2s_write_reg(dev->i2s_base, RCR(ch_reg),
+                                     xfer_resolution);
+                       i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07);
+                       irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
+                       i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03);
+                       i2s_write_reg(dev->i2s_base, RER(ch_reg), 1);
+               }
        }
 
        i2s_write_reg(dev->i2s_base, CCR, ccr);
@@ -263,6 +260,19 @@ static void dw_i2s_shutdown(struct snd_pcm_substream *substream,
        snd_soc_dai_set_dma_data(dai, substream, NULL);
 }
 
+static int dw_i2s_prepare(struct snd_pcm_substream *substream,
+                         struct snd_soc_dai *dai)
+{
+       struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               i2s_write_reg(dev->i2s_base, TXFFR, 1);
+       else
+               i2s_write_reg(dev->i2s_base, RXFFR, 1);
+
+       return 0;
+}
+
 static int dw_i2s_trigger(struct snd_pcm_substream *substream,
                int cmd, struct snd_soc_dai *dai)
 {
@@ -294,6 +304,7 @@ static struct snd_soc_dai_ops dw_i2s_dai_ops = {
        .startup        = dw_i2s_startup,
        .shutdown       = dw_i2s_shutdown,
        .hw_params      = dw_i2s_hw_params,
+       .prepare        = dw_i2s_prepare,
        .trigger        = dw_i2s_trigger,
 };
 
index 91a550f4a10dc7e0156efd8389fc3ba90bb15955..5e793bbb6b02be5f4a628f52755c09564c4a72fd 100644 (file)
 #define ESAI_xCCR_xFP_MASK     (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT)
 #define ESAI_xCCR_xFP(v)       ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK)
 #define ESAI_xCCR_xDC_SHIFT     9
-#define ESAI_xCCR_xDC_WIDTH    4
+#define ESAI_xCCR_xDC_WIDTH    5
 #define ESAI_xCCR_xDC_MASK     (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT)
 #define ESAI_xCCR_xDC(v)       ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK)
 #define ESAI_xCCR_xPSR_SHIFT   8
index a65f17d57ffb44733b7858364678581308dea040..059496ed9ad76c4771ccafbe593a1e0c971f9420 100644 (file)
@@ -1362,9 +1362,9 @@ static int fsl_ssi_probe(struct platform_device *pdev)
        }
 
        ssi_private->irq = platform_get_irq(pdev, 0);
-       if (!ssi_private->irq) {
+       if (ssi_private->irq < 0) {
                dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
-               return -ENXIO;
+               return ssi_private->irq;
        }
 
        /* Are the RX and the TX clocks locked? */
index 4caacb05a62324e404811e45768ec17eed0973ea..cd146d4fa8054fb0980abdda8a9fc506c1c4ef98 100644 (file)
@@ -257,6 +257,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
        if (ret)
                goto clk_fail;
        data->card.num_links = 1;
+       data->card.owner = THIS_MODULE;
        data->card.dai_link = &data->dai;
        data->card.dapm_widgets = imx_wm8962_dapm_widgets;
        data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets);
index fb9240fdc9b70095d364e3e90e14c16653ca4aff..7fe3009b1c43c63c055c4f9ea8f2b25376ad5c7b 100644 (file)
@@ -452,9 +452,8 @@ static int asoc_simple_card_parse_of(struct device_node *node,
 }
 
 /* Decrease the reference count of the device nodes */
-static int asoc_simple_card_unref(struct platform_device *pdev)
+static int asoc_simple_card_unref(struct snd_soc_card *card)
 {
-       struct snd_soc_card *card = platform_get_drvdata(pdev);
        struct snd_soc_dai_link *dai_link;
        int num_links;
 
@@ -556,7 +555,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
                return ret;
 
 err:
-       asoc_simple_card_unref(pdev);
+       asoc_simple_card_unref(&priv->snd_card);
        return ret;
 }
 
@@ -572,7 +571,7 @@ static int asoc_simple_card_remove(struct platform_device *pdev)
                snd_soc_jack_free_gpios(&simple_card_mic_jack, 1,
                                        &simple_card_mic_jack_gpio);
 
-       return asoc_simple_card_unref(pdev);
+       return asoc_simple_card_unref(card);
 }
 
 static const struct of_device_id asoc_simple_of_match[] = {
index e989ecf046c953a7ad79e7c464123909059a7f2a..f86de1211b966c300fd43411254e8fdcb994297f 100644 (file)
@@ -89,7 +89,7 @@ config SND_SOC_INTEL_BROADWELL_MACH
 
 config SND_SOC_INTEL_BYTCR_RT5640_MACH
        tristate "ASoC Audio DSP Support for MID BYT Platform"
-       depends on X86
+       depends on X86 && I2C
        select SND_SOC_RT5640
        select SND_SST_MFLD_PLATFORM
        select SND_SST_IPC_ACPI
@@ -101,7 +101,7 @@ config SND_SOC_INTEL_BYTCR_RT5640_MACH
 
 config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
         tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec"
-        depends on X86_INTEL_LPSS
+        depends on X86_INTEL_LPSS && I2C
         select SND_SOC_RT5670
         select SND_SST_MFLD_PLATFORM
         select SND_SST_IPC_ACPI
index f5d0fc1ab10c1efea603c568219b76735fc45db0..eef0c56ec32e8d8733c9780b5dc5d07ab56fc90f 100644 (file)
@@ -227,4 +227,4 @@ module_platform_driver(snd_byt_mc_driver);
 MODULE_DESCRIPTION("ASoC Intel(R) Baytrail CR Machine driver");
 MODULE_AUTHOR("Subhransu S. Prusty <subhransu.s.prusty@intel.com>");
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:bytrt5640-audio");
+MODULE_ALIAS("platform:bytt100_rt5640");
index 4a5bde9c686be2bbe967fcdd899f7c10f52a74c1..b3f9489794a6acad776fc2cf7670b6e12ab06bf6 100644 (file)
@@ -706,6 +706,7 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
        struct list_head *block_list)
 {
        struct sst_mem_block *block, *tmp;
+       struct sst_block_allocator ba_tmp = *ba;
        u32 end = ba->offset + ba->size, block_end;
        int err;
 
@@ -730,9 +731,9 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
                if (ba->offset >= block->offset && ba->offset < block_end) {
 
                        /* align ba to block boundary */
-                       ba->size -= block_end - ba->offset;
-                       ba->offset = block_end;
-                       err = block_alloc_contiguous(dsp, ba, block_list);
+                       ba_tmp.size -= block_end - ba->offset;
+                       ba_tmp.offset = block_end;
+                       err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
                        if (err < 0)
                                return -ENOMEM;
 
@@ -763,10 +764,14 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
                /* does block span more than 1 section */
                if (ba->offset >= block->offset && ba->offset < block_end) {
 
+                       /* add block */
+                       list_move(&block->list, &dsp->used_block_list);
+                       list_add(&block->module_list, block_list);
                        /* align ba to block boundary */
-                       ba->offset = block->offset;
+                       ba_tmp.size -= block_end - ba->offset;
+                       ba_tmp.offset = block_end;
 
-                       err = block_alloc_contiguous(dsp, ba, block_list);
+                       err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
                        if (err < 0)
                                return -ENOMEM;
 
index 3f8c48231364c6c7b9510da4eb9d73c6e291cace..5bf14040c24a27f33b41dd1f0ee97a2c8d09779b 100644 (file)
@@ -1228,6 +1228,11 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
        struct sst_dsp *sst = hsw->dsp;
        unsigned long flags;
 
+       if (!stream) {
+               dev_warn(hsw->dev, "warning: stream is NULL, no stream to free, ignore it.\n");
+               return 0;
+       }
+
        /* dont free DSP streams that are not commited */
        if (!stream->commited)
                goto out;
@@ -1415,6 +1420,16 @@ int sst_hsw_stream_commit(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
        u32 header;
        int ret;
 
+       if (!stream) {
+               dev_warn(hsw->dev, "warning: stream is NULL, no stream to commit, ignore it.\n");
+               return 0;
+       }
+
+       if (stream->commited) {
+               dev_warn(hsw->dev, "warning: stream is already committed, ignore it.\n");
+               return 0;
+       }
+
        trace_ipc_request("stream alloc", stream->host_id);
 
        header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM);
@@ -1519,6 +1534,11 @@ int sst_hsw_stream_pause(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
 {
        int ret;
 
+       if (!stream) {
+               dev_warn(hsw->dev, "warning: stream is NULL, no stream to pause, ignore it.\n");
+               return 0;
+       }
+
        trace_ipc_request("stream pause", stream->reply.stream_hw_id);
 
        ret = sst_hsw_stream_operations(hsw, IPC_STR_PAUSE,
@@ -1535,6 +1555,11 @@ int sst_hsw_stream_resume(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
 {
        int ret;
 
+       if (!stream) {
+               dev_warn(hsw->dev, "warning: stream is NULL, no stream to resume, ignore it.\n");
+               return 0;
+       }
+
        trace_ipc_request("stream resume", stream->reply.stream_hw_id);
 
        ret = sst_hsw_stream_operations(hsw, IPC_STR_RESUME,
@@ -1550,6 +1575,11 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
 {
        int ret, tries = 10;
 
+       if (!stream) {
+               dev_warn(hsw->dev, "warning: stream is NULL, no stream to reset, ignore it.\n");
+               return 0;
+       }
+
        /* dont reset streams that are not commited */
        if (!stream->commited)
                return 0;
index 3abc29e8a9287d133636d97fe74a96687c953bc0..2ac72eb5e75d82e1a6e8e11e4b176984692215d3 100644 (file)
@@ -343,7 +343,7 @@ int sst_acpi_remove(struct platform_device *pdev)
 }
 
 static struct sst_machines sst_acpi_bytcr[] = {
-       {"10EC5640", "T100", "bytt100_rt5640", NULL, "fw_sst_0f28.bin",
+       {"10EC5640", "T100", "bytt100_rt5640", NULL, "intel/fw_sst_0f28.bin",
                                                &byt_rvp_platform_data },
        {},
 };
index 8b79cafab1e2299af486c343565e68a162060c72..c7eb9dd67f608c47ffa93a97c1824e3dae3c867b 100644 (file)
@@ -434,7 +434,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
        case SND_SOC_DAIFMT_CBM_CFS:
                /* McBSP slave. FS clock as output */
                regs->srgr2     |= FSGM;
-               regs->pcr0      |= FSXM;
+               regs->pcr0      |= FSXM | FSRM;
                break;
        case SND_SOC_DAIFMT_CBM_CFM:
                /* McBSP slave */
index 26ec5117b35c1ace8b5b089200dbd367b06bbbf0..dcc26eda0539b470f37382dacdc78622a8da0a95 100644 (file)
@@ -335,6 +335,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
                            SNDRV_PCM_FMTBIT_S24_LE),
        },
        .ops = &rockchip_i2s_dai_ops,
+       .symmetric_rates = 1,
 };
 
 static const struct snd_soc_component_driver rockchip_i2s_component = {
@@ -454,11 +455,11 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
 
        i2s->playback_dma_data.addr = res->start + I2S_TXDR;
        i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       i2s->playback_dma_data.maxburst = 16;
+       i2s->playback_dma_data.maxburst = 4;
 
        i2s->capture_dma_data.addr = res->start + I2S_RXDR;
        i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       i2s->capture_dma_data.maxburst = 16;
+       i2s->capture_dma_data.maxburst = 4;
 
        i2s->dev = &pdev->dev;
        dev_set_drvdata(&pdev->dev, i2s);
index 89a5d8bc6ee7c9a356a0a9f58e1b6e938c354dce..93f456f518a97dc9a048350093acae715852e10a 100644 (file)
 #define I2S_DMACR_TDE_DISABLE  (0 << I2S_DMACR_TDE_SHIFT)
 #define I2S_DMACR_TDE_ENABLE   (1 << I2S_DMACR_TDE_SHIFT)
 #define I2S_DMACR_TDL_SHIFT    0
-#define I2S_DMACR_TDL(x)       ((x - 1) << I2S_DMACR_TDL_SHIFT)
+#define I2S_DMACR_TDL(x)       ((x) << I2S_DMACR_TDL_SHIFT)
 #define I2S_DMACR_TDL_MASK     (0x1f << I2S_DMACR_TDL_SHIFT)
 
 /*
index 590a82f01d0bdc2cc2fa39b89c006a2ec8b2b589..025c38fbe3c03fea08db0b365e472902c5f110fc 100644 (file)
@@ -659,7 +659,8 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
                        rtd->dai_link->stream_name);
 
                ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
-                               1, 0, &be_pcm);
+                               rtd->dai_link->dpcm_playback,
+                               rtd->dai_link->dpcm_capture, &be_pcm);
                if (ret < 0) {
                        dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n",
                                rtd->dai_link->name);
@@ -668,8 +669,10 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
 
                rtd->pcm = be_pcm;
                rtd->fe_compr = 1;
-               be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
-               be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
+               if (rtd->dai_link->dpcm_playback)
+                       be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
+               else if (rtd->dai_link->dpcm_capture)
+                       be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
                memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
        } else
                memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
index 985052b3fbed375dee764a64b63852998532d5e2..2c62620abca691af4033552518eb7138ea6485d5 100644 (file)
@@ -3230,7 +3230,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
                                   const char *propname)
 {
        struct device_node *np = card->dev->of_node;
-       int num_routes, old_routes;
+       int num_routes;
        struct snd_soc_dapm_route *routes;
        int i, ret;
 
@@ -3248,9 +3248,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
                return -EINVAL;
        }
 
-       old_routes = card->num_dapm_routes;
-       routes = devm_kzalloc(card->dev,
-                             (old_routes + num_routes) * sizeof(*routes),
+       routes = devm_kzalloc(card->dev, num_routes * sizeof(*routes),
                              GFP_KERNEL);
        if (!routes) {
                dev_err(card->dev,
@@ -3258,11 +3256,9 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
                return -EINVAL;
        }
 
-       memcpy(routes, card->dapm_routes, old_routes * sizeof(*routes));
-
        for (i = 0; i < num_routes; i++) {
                ret = of_property_read_string_index(np, propname,
-                       2 * i, &routes[old_routes + i].sink);
+                       2 * i, &routes[i].sink);
                if (ret) {
                        dev_err(card->dev,
                                "ASoC: Property '%s' index %d could not be read: %d\n",
@@ -3270,7 +3266,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
                        return -EINVAL;
                }
                ret = of_property_read_string_index(np, propname,
-                       (2 * i) + 1, &routes[old_routes + i].source);
+                       (2 * i) + 1, &routes[i].source);
                if (ret) {
                        dev_err(card->dev,
                                "ASoC: Property '%s' index %d could not be read: %d\n",
@@ -3279,7 +3275,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
                }
        }
 
-       card->num_dapm_routes += num_routes;
+       card->num_dapm_routes = num_routes;
        card->dapm_routes = routes;
 
        return 0;
index 272844746135763faa6425aae3fb8dc3bfc9ec50..327f8642ca80e66de2d0c880034cbd530f046dd1 100644 (file)
@@ -816,7 +816,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev)
                return -EINVAL;
        }
 
-       if (cdev->n_streams < 2) {
+       if (cdev->n_streams < 1) {
                dev_err(dev, "bogus number of streams: %d\n", cdev->n_streams);
                return -EINVAL;
        }
index 41650d5b93b70e5a9a36abbb7ffae4579075d8ab..3e2ef61c627b831bfec65724cc7166db051f5099 100644 (file)
@@ -913,6 +913,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
        case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
        case USB_ID(0x046d, 0x0808):
        case USB_ID(0x046d, 0x0809):
+       case USB_ID(0x046d, 0x0819): /* Logitech Webcam C210 */
        case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
        case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
        case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
index 6eedba1f773227d5df93bae06d7f3a0fcc1bd648..653d1bad77de2331ba18771f99a01dddcee0175c 100644 (file)
@@ -22,6 +22,8 @@
 #error only <linux/bitops.h> can be included directly
 #endif
 
+#include <asm-generic/bitops/hweight.h>
+
 #include <asm-generic/bitops/atomic.h>
 
 #endif /* __TOOLS_ASM_GENERIC_BITOPS_H */
diff --git a/tools/include/asm-generic/bitops/arch_hweight.h b/tools/include/asm-generic/bitops/arch_hweight.h
new file mode 100644 (file)
index 0000000..318bb2b
--- /dev/null
@@ -0,0 +1 @@
+#include "../../../../include/asm-generic/bitops/arch_hweight.h"
diff --git a/tools/include/asm-generic/bitops/const_hweight.h b/tools/include/asm-generic/bitops/const_hweight.h
new file mode 100644 (file)
index 0000000..0afd644
--- /dev/null
@@ -0,0 +1 @@
+#include "../../../../include/asm-generic/bitops/const_hweight.h"
diff --git a/tools/include/asm-generic/bitops/hweight.h b/tools/include/asm-generic/bitops/hweight.h
new file mode 100644 (file)
index 0000000..290120c
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_
+#define _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_
+
+#include <asm-generic/bitops/arch_hweight.h>
+#include <asm-generic/bitops/const_hweight.h>
+
+#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_ */
index 26005a15e7e29d34332b88e68a49fe223953913b..5ad9ee1dd7f6aed579a5e631e309438a80bb472f 100644 (file)
@@ -1,9 +1,9 @@
 #ifndef _TOOLS_LINUX_BITOPS_H_
 #define _TOOLS_LINUX_BITOPS_H_
 
+#include <asm/types.h>
 #include <linux/kernel.h>
 #include <linux/compiler.h>
-#include <asm/hweight.h>
 
 #ifndef __WORDSIZE
 #define __WORDSIZE (__SIZEOF_LONG__ * 8)
 #define BITS_TO_U32(nr)                DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
 #define BITS_TO_BYTES(nr)      DIV_ROUND_UP(nr, BITS_PER_BYTE)
 
+extern unsigned int __sw_hweight8(unsigned int w);
+extern unsigned int __sw_hweight16(unsigned int w);
+extern unsigned int __sw_hweight32(unsigned int w);
+extern unsigned long __sw_hweight64(__u64 w);
+
 /*
  * Include this here because some architectures need generic_ffs/fls in
  * scope
index a74fba6d774353d33fac7f04b71abdd241e0218e..86ea2d7b88451c219dacad848e60564fb1a64ecf 100644 (file)
@@ -67,7 +67,7 @@ int debugfs_valid_mountpoint(const char *debugfs)
 
        if (statfs(debugfs, &st_fs) < 0)
                return -ENOENT;
-       else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
+       else if ((long)st_fs.f_type != (long)DEBUGFS_MAGIC)
                return -ENOENT;
 
        return 0;
index 65d9be3f988747ae300db30d3b62cf0e8213dd69..128ef6332a6bd89c0ddbeef283c4dfccbf5f8417 100644 (file)
@@ -79,7 +79,7 @@ static int fs__valid_mount(const char *fs, long magic)
 
        if (statfs(fs, &st_fs) < 0)
                return -ENOENT;
-       else if (st_fs.f_type != magic)
+       else if ((long)st_fs.f_type != magic)
                return -ENOENT;
 
        return 0;
index 6f803609e498246d277d35829b18c7924a136eca..0b0112c80f22b390875799909ab567b42c66009e 100644 (file)
@@ -317,7 +317,7 @@ int pthread_mutex_destroy(pthread_mutex_t *mutex)
         *
         * TODO: Hook into free() and add that check there as well.
         */
-       debug_check_no_locks_freed(mutex, mutex + sizeof(*mutex));
+       debug_check_no_locks_freed(mutex, sizeof(*mutex));
        __del_lock(__get_lock(mutex));
        return ll_pthread_mutex_destroy(mutex);
 }
@@ -341,7 +341,7 @@ int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
 {
        try_init_preload();
 
-       debug_check_no_locks_freed(rwlock, rwlock + sizeof(*rwlock));
+       debug_check_no_locks_freed(rwlock, sizeof(*rwlock));
        __del_lock(__get_lock(rwlock));
        return ll_pthread_rwlock_destroy(rwlock);
 }
index 83e2887f91a39200612290bd0b2b2fbd824e660a..fbbfdc39271dac69bd96aa932d6e62318dbce143 100644 (file)
@@ -6,12 +6,15 @@ tools/lib/symbol/kallsyms.c
 tools/lib/symbol/kallsyms.h
 tools/lib/util/find_next_bit.c
 tools/include/asm/bug.h
+tools/include/asm-generic/bitops/arch_hweight.h
 tools/include/asm-generic/bitops/atomic.h
+tools/include/asm-generic/bitops/const_hweight.h
 tools/include/asm-generic/bitops/__ffs.h
 tools/include/asm-generic/bitops/__fls.h
 tools/include/asm-generic/bitops/find.h
 tools/include/asm-generic/bitops/fls64.h
 tools/include/asm-generic/bitops/fls.h
+tools/include/asm-generic/bitops/hweight.h
 tools/include/asm-generic/bitops.h
 tools/include/linux/bitops.h
 tools/include/linux/compiler.h
@@ -19,6 +22,8 @@ tools/include/linux/export.h
 tools/include/linux/hash.h
 tools/include/linux/log2.h
 tools/include/linux/types.h
+include/asm-generic/bitops/arch_hweight.h
+include/asm-generic/bitops/const_hweight.h
 include/asm-generic/bitops/fls64.h
 include/asm-generic/bitops/__fls.h
 include/asm-generic/bitops/fls.h
@@ -29,6 +34,7 @@ include/linux/list.h
 include/linux/hash.h
 include/linux/stringify.h
 lib/find_next_bit.c
+lib/hweight.c
 lib/rbtree.c
 include/linux/swab.h
 arch/*/include/asm/unistd*.h
index 67a03a825b3c94bb894f0f557dac3bb520943e65..aa6a50447c32b63fd3f55a5ada5059610d610ec1 100644 (file)
@@ -232,12 +232,15 @@ LIB_H += ../include/linux/hash.h
 LIB_H += ../../include/linux/stringify.h
 LIB_H += util/include/linux/bitmap.h
 LIB_H += ../include/linux/bitops.h
+LIB_H += ../include/asm-generic/bitops/arch_hweight.h
 LIB_H += ../include/asm-generic/bitops/atomic.h
+LIB_H += ../include/asm-generic/bitops/const_hweight.h
 LIB_H += ../include/asm-generic/bitops/find.h
 LIB_H += ../include/asm-generic/bitops/fls64.h
 LIB_H += ../include/asm-generic/bitops/fls.h
 LIB_H += ../include/asm-generic/bitops/__ffs.h
 LIB_H += ../include/asm-generic/bitops/__fls.h
+LIB_H += ../include/asm-generic/bitops/hweight.h
 LIB_H += ../include/asm-generic/bitops.h
 LIB_H += ../include/linux/compiler.h
 LIB_H += ../include/linux/log2.h
@@ -255,7 +258,6 @@ LIB_H += util/include/linux/linkage.h
 LIB_H += util/include/asm/asm-offsets.h
 LIB_H += ../include/asm/bug.h
 LIB_H += util/include/asm/byteorder.h
-LIB_H += util/include/asm/hweight.h
 LIB_H += util/include/asm/swab.h
 LIB_H += util/include/asm/system.h
 LIB_H += util/include/asm/uaccess.h
@@ -462,10 +464,12 @@ BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
 # Benchmark modules
 BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o
 BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o
-ifeq ($(RAW_ARCH),x86_64)
+ifeq ($(ARCH), x86)
+ifeq ($(IS_64_BIT), 1)
 BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o
 BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o
 endif
+endif
 BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
 BUILTIN_OBJS += $(OUTPUT)bench/futex-hash.o
 BUILTIN_OBJS += $(OUTPUT)bench/futex-wake.o
@@ -743,6 +747,9 @@ $(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c $(OUTPUT)PERF-CFLAGS
 $(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
 
+$(OUTPUT)util/hweight.o: ../../lib/hweight.c $(OUTPUT)PERF-CFLAGS
+       $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+
 $(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
 
index 3bb50eac5542fc62c4482c92f4478806e731a9f7..0c370f81e00280c6428ddfe0975584edcb9d02a7 100644 (file)
@@ -103,7 +103,7 @@ static Dwarf_Frame *get_eh_frame(Dwfl_Module *mod, Dwarf_Addr pc)
                return NULL;
        }
 
-       result = dwarf_cfi_addrframe(cfi, pc, &frame);
+       result = dwarf_cfi_addrframe(cfi, pc-bias, &frame);
        if (result) {
                pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
                return NULL;
@@ -128,7 +128,7 @@ static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc)
                return NULL;
        }
 
-       result = dwarf_cfi_addrframe(cfi, pc, &frame);
+       result = dwarf_cfi_addrframe(cfi, pc-bias, &frame);
        if (result) {
                pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
                return NULL;
@@ -145,7 +145,7 @@ static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc)
  *             yet used)
  *     -1 in case of errors
  */
-static int check_return_addr(struct dso *dso, Dwarf_Addr pc)
+static int check_return_addr(struct dso *dso, u64 map_start, Dwarf_Addr pc)
 {
        int             rc = -1;
        Dwfl            *dwfl;
@@ -155,6 +155,7 @@ static int check_return_addr(struct dso *dso, Dwarf_Addr pc)
        Dwarf_Addr      start = pc;
        Dwarf_Addr      end = pc;
        bool            signalp;
+       const char      *exec_file = dso->long_name;
 
        dwfl = dso->dwfl;
 
@@ -165,8 +166,10 @@ static int check_return_addr(struct dso *dso, Dwarf_Addr pc)
                        return -1;
                }
 
-               if (dwfl_report_offline(dwfl, "", dso->long_name, -1) == NULL) {
-                       pr_debug("dwfl_report_offline() failed %s\n",
+               mod = dwfl_report_elf(dwfl, exec_file, exec_file, -1,
+                                               map_start, false);
+               if (!mod) {
+                       pr_debug("dwfl_report_elf() failed %s\n",
                                                dwarf_errmsg(-1));
                        /*
                         * We normally cache the DWARF debug info and never
@@ -256,10 +259,10 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
                return skip_slot;
        }
 
-       rc = check_return_addr(dso, ip);
+       rc = check_return_addr(dso, al.map->start, ip);
 
-       pr_debug("DSO %s, nr %" PRIx64 ", ip 0x%" PRIx64 "rc %d\n",
-                               dso->long_name, chain->nr, ip, rc);
+       pr_debug("[DSO %s, sym %s, ip 0x%" PRIx64 "] rc %d\n",
+                               dso->long_name, al.sym->name, ip, rc);
 
        if (rc == 0) {
                /*
index 07a8d7646a1549c61699f7311285238ef5ef93cf..005cc283790cfb7cf4db014362cb5d56f2a89ca2 100644 (file)
 #include <stdlib.h>
 #include <signal.h>
 #include <sys/wait.h>
-#include <linux/unistd.h>
 #include <string.h>
 #include <errno.h>
 #include <assert.h>
 #include <sys/time.h>
 #include <sys/types.h>
+#include <sys/syscall.h>
 
 #include <pthread.h>
 
index e7417fe97a9775eae8712d5bb58be2db0d1d7363..747f86103599826b6555563d25c8be25ae8f3d36 100644 (file)
@@ -232,7 +232,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
                if (nr_samples > 0) {
                        total_nr_samples += nr_samples;
                        hists__collapse_resort(hists, NULL);
-                       hists__output_resort(hists);
+                       hists__output_resort(hists, NULL);
 
                        if (symbol_conf.event_group &&
                            !perf_evsel__is_group_leader(pos))
index 1ce425d101a99691121b62a2a0c879c4f362fdf5..1fd96c13f1998a4048cbc5ab3eef1df35a5f80b8 100644 (file)
@@ -545,6 +545,42 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
        return __hist_entry__cmp_compute(p_left, p_right, c);
 }
 
+static int64_t
+hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
+                   struct hist_entry *right __maybe_unused)
+{
+       return 0;
+}
+
+static int64_t
+hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
+{
+       if (sort_compute)
+               return 0;
+
+       if (left->stat.period == right->stat.period)
+               return 0;
+       return left->stat.period > right->stat.period ? 1 : -1;
+}
+
+static int64_t
+hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right)
+{
+       return hist_entry__cmp_compute(right, left, COMPUTE_DELTA);
+}
+
+static int64_t
+hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right)
+{
+       return hist_entry__cmp_compute(right, left, COMPUTE_RATIO);
+}
+
+static int64_t
+hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
+{
+       return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF);
+}
+
 static void insert_hist_entry_by_compute(struct rb_root *root,
                                         struct hist_entry *he,
                                         int c)
@@ -605,7 +641,7 @@ static void hists__process(struct hists *hists)
                hists__precompute(hists);
                hists__compute_resort(hists);
        } else {
-               hists__output_resort(hists);
+               hists__output_resort(hists, NULL);
        }
 
        hists__fprintf(hists, true, 0, 0, 0, stdout);
@@ -1038,27 +1074,35 @@ static void data__hpp_register(struct data__file *d, int idx)
        fmt->header = hpp__header;
        fmt->width  = hpp__width;
        fmt->entry  = hpp__entry_global;
+       fmt->cmp    = hist_entry__cmp_nop;
+       fmt->collapse = hist_entry__cmp_nop;
 
        /* TODO more colors */
        switch (idx) {
        case PERF_HPP_DIFF__BASELINE:
                fmt->color = hpp__color_baseline;
+               fmt->sort  = hist_entry__cmp_baseline;
                break;
        case PERF_HPP_DIFF__DELTA:
                fmt->color = hpp__color_delta;
+               fmt->sort  = hist_entry__cmp_delta;
                break;
        case PERF_HPP_DIFF__RATIO:
                fmt->color = hpp__color_ratio;
+               fmt->sort  = hist_entry__cmp_ratio;
                break;
        case PERF_HPP_DIFF__WEIGHTED_DIFF:
                fmt->color = hpp__color_wdiff;
+               fmt->sort  = hist_entry__cmp_wdiff;
                break;
        default:
+               fmt->sort  = hist_entry__cmp_nop;
                break;
        }
 
        init_header(d, dfmt);
        perf_hpp__column_register(fmt);
+       perf_hpp__register_sort_field(fmt);
 }
 
 static void ui_init(void)
index 011195e38f2173947550100e62927e908b429d30..198f3c3aff952358766626f5bfea9ce81a996b28 100644 (file)
@@ -19,7 +19,9 @@
 int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
 {
        int i;
-       const struct option list_options[] = {
+       bool raw_dump = false;
+       struct option list_options[] = {
+               OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"),
                OPT_END()
        };
        const char * const list_usage[] = {
@@ -27,11 +29,18 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
                NULL
        };
 
+       set_option_flag(list_options, 0, "raw-dump", PARSE_OPT_HIDDEN);
+
        argc = parse_options(argc, argv, list_options, list_usage,
                             PARSE_OPT_STOP_AT_NON_OPTION);
 
        setup_pager();
 
+       if (raw_dump) {
+               print_events(NULL, true);
+               return 0;
+       }
+
        if (argc == 0) {
                print_events(NULL, false);
                return 0;
@@ -53,8 +62,6 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
                        print_hwcache_events(NULL, false);
                else if (strcmp(argv[i], "pmu") == 0)
                        print_pmu_events(NULL, false);
-               else if (strcmp(argv[i], "--raw-dump") == 0)
-                       print_events(NULL, true);
                else {
                        char *sep = strchr(argv[i], ':'), *s;
                        int sep_idx;
index 39367609c707bc0332d4fdbd4a05cf49f4cb78d5..072ae8ad67fc1d258354b621a3ae7b2833deba0c 100644 (file)
@@ -457,6 +457,19 @@ static void report__collapse_hists(struct report *rep)
        ui_progress__finish();
 }
 
+static void report__output_resort(struct report *rep)
+{
+       struct ui_progress prog;
+       struct perf_evsel *pos;
+
+       ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
+
+       evlist__for_each(rep->session->evlist, pos)
+               hists__output_resort(evsel__hists(pos), &prog);
+
+       ui_progress__finish();
+}
+
 static int __cmd_report(struct report *rep)
 {
        int ret;
@@ -505,13 +518,20 @@ static int __cmd_report(struct report *rep)
        if (session_done())
                return 0;
 
+       /*
+        * recalculate number of entries after collapsing since it
+        * might be changed during the collapse phase.
+        */
+       rep->nr_entries = 0;
+       evlist__for_each(session->evlist, pos)
+               rep->nr_entries += evsel__hists(pos)->nr_entries;
+
        if (rep->nr_entries == 0) {
                ui__error("The %s file has no samples!\n", file->path);
                return 0;
        }
 
-       evlist__for_each(session->evlist, pos)
-               hists__output_resort(evsel__hists(pos));
+       report__output_resort(rep);
 
        return report__browse_hists(rep);
 }
index 0aa7747ff1390e0995a875a6c185697901cb9632..616f0fcb47010abf68ac7e4a9e256fa559af4499 100644 (file)
@@ -66,7 +66,6 @@
 #include <sys/utsname.h>
 #include <sys/mman.h>
 
-#include <linux/unistd.h>
 #include <linux/types.h>
 
 static volatile int done;
@@ -285,7 +284,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
        }
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        hists__output_recalc_col_len(hists, top->print_entries - printed);
        putchar('\n');
@@ -554,7 +553,7 @@ static void perf_top__sort_new_samples(void *arg)
        }
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 }
 
 static void *display_thread_tui(void *arg)
index 5d4b039fe1edc6ebf6f6dafec597e125383d0c4f..648e31ff4021c2e11520ab8b00e6f89213d324a9 100644 (file)
@@ -20,7 +20,7 @@ NO_PERF_REGS := 1
 
 # Additional ARCH settings for x86
 ifeq ($(ARCH),x86)
-  ifeq (${IS_X86_64}, 1)
+  ifeq (${IS_64_BIT}, 1)
     CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT
     ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
     LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
index 851cd0172a7694a0e21fd18d07c79031d5f9b253..ff95a68741d1ccdb54e54d929f2292e88963a5d1 100644 (file)
@@ -1,7 +1,7 @@
 
 uname_M := $(shell uname -m 2>/dev/null || echo not)
 
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
+RAW_ARCH := $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
                                   -e s/arm.*/arm/ -e s/sa110/arm/ \
                                   -e s/s390x/s390/ -e s/parisc64/parisc/ \
                                   -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
@@ -9,23 +9,23 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
                                   -e s/tile.*/tile/ )
 
 # Additional ARCH settings for x86
-ifeq ($(ARCH),i386)
-  override ARCH := x86
+ifeq ($(RAW_ARCH),i386)
+  ARCH ?= x86
 endif
 
-ifeq ($(ARCH),x86_64)
-  override ARCH := x86
-  IS_X86_64 := 0
-  ifeq (, $(findstring m32,$(CFLAGS)))
-    IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1)
-    RAW_ARCH := x86_64
+ifeq ($(RAW_ARCH),x86_64)
+  ARCH ?= x86
+
+  ifneq (, $(findstring m32,$(CFLAGS)))
+    RAW_ARCH := x86_32
   endif
 endif
 
-ifeq (${IS_X86_64}, 1)
+ARCH ?= $(RAW_ARCH)
+
+LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
+ifeq ($(LP64), 1)
   IS_64_BIT := 1
-else ifeq ($(ARCH),x86)
-  IS_64_BIT := 0
 else
-  IS_64_BIT := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
+  IS_64_BIT := 0
 endif
index a3b13d7dc1d43f3caf301b4d9f941c60d88ed37c..6ef68165c9db628d23bbe85b48945ac2581ec979 100644 (file)
@@ -6,7 +6,6 @@
 #include <sys/syscall.h>
 #include <linux/types.h>
 #include <linux/perf_event.h>
-#include <asm/unistd.h>
 
 #if defined(__i386__)
 #define mb()           asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
index 790ceba6ad3f4a4102a1affa81a637ef774d7d43..28431d1bbcf5768de83f93165f8bf9599ad0f087 100644 (file)
@@ -5,7 +5,10 @@
  *     ANY CHANGES MADE HERE WILL BE LOST! 
  *
  */
-
+#include <stdbool.h>
+#ifndef HAS_BOOL
+# define HAS_BOOL 1
+#endif
 #line 1 "Context.xs"
 /*
  * Context.xs.  XS interfaces for perf script.
index ab28cca2cb97ad436dd7c419ee4ee7fa37ecb0b9..0bf06bec68c7e9786668990ad399b578326726c5 100644 (file)
@@ -11,6 +11,9 @@
 #include "thread.h"
 #include "callchain.h"
 
+/* For bsearch. We try to unwind functions in shared object. */
+#include <stdlib.h>
+
 static int mmap_handler(struct perf_tool *tool __maybe_unused,
                        union perf_event *event,
                        struct perf_sample *sample __maybe_unused,
@@ -28,7 +31,7 @@ static int init_live_machine(struct machine *machine)
                                                  mmap_handler, machine, true);
 }
 
-#define MAX_STACK 6
+#define MAX_STACK 8
 
 static int unwind_entry(struct unwind_entry *entry, void *arg)
 {
@@ -37,6 +40,8 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
        static const char *funcs[MAX_STACK] = {
                "test__arch_unwind_sample",
                "unwind_thread",
+               "compare",
+               "bsearch",
                "krava_3",
                "krava_2",
                "krava_1",
@@ -88,10 +93,37 @@ static int unwind_thread(struct thread *thread)
        return err;
 }
 
+static int global_unwind_retval = -INT_MAX;
+
+__attribute__ ((noinline))
+static int compare(void *p1, void *p2)
+{
+       /* Any possible value should be 'thread' */
+       struct thread *thread = *(struct thread **)p1;
+
+       if (global_unwind_retval == -INT_MAX)
+               global_unwind_retval = unwind_thread(thread);
+
+       return p1 - p2;
+}
+
 __attribute__ ((noinline))
 static int krava_3(struct thread *thread)
 {
-       return unwind_thread(thread);
+       struct thread *array[2] = {thread, thread};
+       void *fp = &bsearch;
+       /*
+        * make _bsearch a volatile function pointer to
+        * prevent potential optimization, which may expand
+        * bsearch and call compare directly from this function,
+        * instead of libc shared object.
+        */
+       void *(*volatile _bsearch)(void *, void *, size_t,
+                       size_t, int (*)(void *, void *));
+
+       _bsearch = fp;
+       _bsearch(array, &thread, 2, sizeof(struct thread **), compare);
+       return global_unwind_retval;
 }
 
 __attribute__ ((noinline))
index 614d5c4978ab6509559eff9f275dc594310020f2..8d110dec393ee1a42f78cb1b440ea9d19f825e1c 100644 (file)
@@ -187,7 +187,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
         * function since TEST_ASSERT_VAL() returns in case of failure.
         */
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("use callchain: %d, cumulate callchain: %d\n",
@@ -454,12 +454,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
         *   30.00%    10.00%     perf  perf           [.] cmd_record
         *   20.00%     0.00%     bash  libc           [.] malloc
         *   10.00%    10.00%     bash  [kernel]       [k] page_fault
-        *   10.00%    10.00%     perf  [kernel]       [k] schedule
-        *   10.00%     0.00%     perf  [kernel]       [k] sys_perf_event_open
+        *   10.00%    10.00%     bash  bash           [.] xmalloc
         *   10.00%    10.00%     perf  [kernel]       [k] page_fault
-        *   10.00%    10.00%     perf  libc           [.] free
         *   10.00%    10.00%     perf  libc           [.] malloc
-        *   10.00%    10.00%     bash  bash           [.] xmalloc
+        *   10.00%    10.00%     perf  [kernel]       [k] schedule
+        *   10.00%    10.00%     perf  libc           [.] free
+        *   10.00%     0.00%     perf  [kernel]       [k] sys_perf_event_open
         */
        struct result expected[] = {
                { 7000, 2000, "perf", "perf",     "main" },
@@ -468,12 +468,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
                { 3000, 1000, "perf", "perf",     "cmd_record" },
                { 2000,    0, "bash", "libc",     "malloc" },
                { 1000, 1000, "bash", "[kernel]", "page_fault" },
-               { 1000, 1000, "perf", "[kernel]", "schedule" },
-               { 1000,    0, "perf", "[kernel]", "sys_perf_event_open" },
+               { 1000, 1000, "bash", "bash",     "xmalloc" },
                { 1000, 1000, "perf", "[kernel]", "page_fault" },
+               { 1000, 1000, "perf", "[kernel]", "schedule" },
                { 1000, 1000, "perf", "libc",     "free" },
                { 1000, 1000, "perf", "libc",     "malloc" },
-               { 1000, 1000, "bash", "bash",     "xmalloc" },
+               { 1000,    0, "perf", "[kernel]", "sys_perf_event_open" },
        };
 
        symbol_conf.use_callchain = false;
@@ -537,10 +537,13 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
         *                  malloc
         *                  main
         *
-        *   10.00%    10.00%     perf  [kernel]       [k] schedule
+        *   10.00%    10.00%     bash  bash           [.] xmalloc
         *              |
-        *              --- schedule
-        *                  run_command
+        *              --- xmalloc
+        *                  malloc
+        *                  xmalloc     <--- NOTE: there's a cycle
+        *                  malloc
+        *                  xmalloc
         *                  main
         *
         *   10.00%     0.00%     perf  [kernel]       [k] sys_perf_event_open
@@ -556,6 +559,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
         *                  run_command
         *                  main
         *
+        *   10.00%    10.00%     perf  [kernel]       [k] schedule
+        *              |
+        *              --- schedule
+        *                  run_command
+        *                  main
+        *
         *   10.00%    10.00%     perf  libc           [.] free
         *              |
         *              --- free
@@ -570,15 +579,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
         *                  run_command
         *                  main
         *
-        *   10.00%    10.00%     bash  bash           [.] xmalloc
-        *              |
-        *              --- xmalloc
-        *                  malloc
-        *                  xmalloc     <--- NOTE: there's a cycle
-        *                  malloc
-        *                  xmalloc
-        *                  main
-        *
         */
        struct result expected[] = {
                { 7000, 2000, "perf", "perf",     "main" },
@@ -587,12 +587,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                { 3000, 1000, "perf", "perf",     "cmd_record" },
                { 2000,    0, "bash", "libc",     "malloc" },
                { 1000, 1000, "bash", "[kernel]", "page_fault" },
-               { 1000, 1000, "perf", "[kernel]", "schedule" },
+               { 1000, 1000, "bash", "bash",     "xmalloc" },
                { 1000,    0, "perf", "[kernel]", "sys_perf_event_open" },
                { 1000, 1000, "perf", "[kernel]", "page_fault" },
+               { 1000, 1000, "perf", "[kernel]", "schedule" },
                { 1000, 1000, "perf", "libc",     "free" },
                { 1000, 1000, "perf", "libc",     "malloc" },
-               { 1000, 1000, "bash", "bash",     "xmalloc" },
        };
        struct callchain_result expected_callchain[] = {
                {
@@ -622,9 +622,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                                { "bash",     "main" }, },
                },
                {
-                       3, {    { "[kernel]", "schedule" },
-                               { "perf",     "run_command" },
-                               { "perf",     "main" }, },
+                       6, {    { "bash",     "xmalloc" },
+                               { "libc",     "malloc" },
+                               { "bash",     "xmalloc" },
+                               { "libc",     "malloc" },
+                               { "bash",     "xmalloc" },
+                               { "bash",     "main" }, },
                },
                {
                        3, {    { "[kernel]", "sys_perf_event_open" },
@@ -637,6 +640,11 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                                { "perf",     "run_command" },
                                { "perf",     "main" }, },
                },
+               {
+                       3, {    { "[kernel]", "schedule" },
+                               { "perf",     "run_command" },
+                               { "perf",     "main" }, },
+               },
                {
                        4, {    { "libc",     "free" },
                                { "perf",     "cmd_record" },
@@ -649,14 +657,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                                { "perf",     "run_command" },
                                { "perf",     "main" }, },
                },
-               {
-                       6, {    { "bash",     "xmalloc" },
-                               { "libc",     "malloc" },
-                               { "bash",     "xmalloc" },
-                               { "libc",     "malloc" },
-                               { "bash",     "xmalloc" },
-                               { "bash",     "main" }, },
-               },
        };
 
        symbol_conf.use_callchain = true;
index 74f257a812653177f9d334d7a25df8359d8ef3d4..59e53db7914c0ad6100ab2e616cdf21e39efea46 100644 (file)
@@ -138,7 +138,7 @@ int test__hists_filter(void)
                struct hists *hists = evsel__hists(evsel);
 
                hists__collapse_resort(hists, NULL);
-               hists__output_resort(hists);
+               hists__output_resort(hists, NULL);
 
                if (verbose > 2) {
                        pr_info("Normal histogram\n");
index a748f2be1222e3d44791eebacaf8c53174a617bf..f5547610da0200b70c0bdc1a006adaee925eba73 100644 (file)
@@ -152,7 +152,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -252,7 +252,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -306,7 +306,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -384,7 +384,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -487,7 +487,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
index e6bb04b5b09b863013e4d361120269d59f6207c6..788506eef5671da5e64016063569b79d4e060d97 100644 (file)
@@ -550,7 +550,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
        bool need_percent;
 
        node = rb_first(root);
-       need_percent = !!rb_next(node);
+       need_percent = node && rb_next(node);
 
        while (node) {
                struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
index dc0d095f318c7da2868352d5a4c048a5dde40251..482adae3cc44a50889bb2278b323a3b6871197c6 100644 (file)
@@ -204,6 +204,9 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
                if (ret)
                        return ret;
 
+               if (a->thread != b->thread || !symbol_conf.use_callchain)
+                       return 0;
+
                ret = b->callchain->max_depth - a->callchain->max_depth;
        }
        return ret;
index 2f612562978cdc13c7e89b6dbddd24c9f928d626..3c38f25b1695cdd289808d0d9f5ea858f06fc5db 100644 (file)
@@ -1,5 +1,8 @@
 #include <signal.h>
 #include <stdbool.h>
+#ifdef HAVE_BACKTRACE_SUPPORT
+#include <execinfo.h>
+#endif
 
 #include "../../util/cache.h"
 #include "../../util/debug.h"
@@ -88,6 +91,25 @@ int ui__getch(int delay_secs)
        return SLkp_getkey();
 }
 
+#ifdef HAVE_BACKTRACE_SUPPORT
+static void ui__signal_backtrace(int sig)
+{
+       void *stackdump[32];
+       size_t size;
+
+       ui__exit(false);
+       psignal(sig, "perf");
+
+       printf("-------- backtrace --------\n");
+       size = backtrace(stackdump, ARRAY_SIZE(stackdump));
+       backtrace_symbols_fd(stackdump, size, STDOUT_FILENO);
+
+       exit(0);
+}
+#else
+# define ui__signal_backtrace  ui__signal
+#endif
+
 static void ui__signal(int sig)
 {
        ui__exit(false);
@@ -122,8 +144,8 @@ int ui__init(void)
        ui_browser__init();
        tui_progress__init();
 
-       signal(SIGSEGV, ui__signal);
-       signal(SIGFPE, ui__signal);
+       signal(SIGSEGV, ui__signal_backtrace);
+       signal(SIGFPE, ui__signal_backtrace);
        signal(SIGINT, ui__signal);
        signal(SIGQUIT, ui__signal);
        signal(SIGTERM, ui__signal);
index 79999ceaf2be08e5f4880e853d467bf59874340c..01bc4e23a2cf58f1724c462706fee0e70234d2e0 100644 (file)
@@ -177,14 +177,17 @@ static int lock__parse(struct ins_operands *ops)
                goto out_free_ops;
 
        ops->locked.ins = ins__find(name);
+       free(name);
+
        if (ops->locked.ins == NULL)
                goto out_free_ops;
 
        if (!ops->locked.ins->ops)
                return 0;
 
-       if (ops->locked.ins->ops->parse)
-               ops->locked.ins->ops->parse(ops->locked.ops);
+       if (ops->locked.ins->ops->parse &&
+           ops->locked.ins->ops->parse(ops->locked.ops) < 0)
+               goto out_free_ops;
 
        return 0;
 
@@ -208,6 +211,13 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
 
 static void lock__delete(struct ins_operands *ops)
 {
+       struct ins *ins = ops->locked.ins;
+
+       if (ins && ins->ops->free)
+               ins->ops->free(ops->locked.ops);
+       else
+               ins__delete(ops->locked.ops);
+
        zfree(&ops->locked.ops);
        zfree(&ops->target.raw);
        zfree(&ops->target.name);
@@ -531,8 +541,8 @@ static void disasm_line__init_ins(struct disasm_line *dl)
        if (!dl->ins->ops)
                return;
 
-       if (dl->ins->ops->parse)
-               dl->ins->ops->parse(&dl->ops);
+       if (dl->ins->ops->parse && dl->ins->ops->parse(&dl->ops) < 0)
+               dl->ins = NULL;
 }
 
 static int disasm_line__parse(char *line, char **namep, char **rawp)
index 0784a9420528603efa9450a8c72c78ebf271298a..cadbdc90a5cbf319385cb67aa5a88c06cdf107dc 100644 (file)
@@ -116,11 +116,6 @@ struct annotation {
        struct annotated_source *src;
 };
 
-struct sannotation {
-       struct annotation annotation;
-       struct symbol     symbol;
-};
-
 static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx)
 {
        return (((void *)&notes->src->histograms) +
@@ -129,8 +124,7 @@ static inline struct sym_hist *annotation__histogram(struct annotation *notes, i
 
 static inline struct annotation *symbol__annotation(struct symbol *sym)
 {
-       struct sannotation *a = container_of(sym, struct sannotation, symbol);
-       return &a->annotation;
+       return (void *)sym - symbol_conf.priv_size;
 }
 
 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx);
index 5cf9e1b5989de40cb677b1bd744d684cd3bfc871..d04d770d90f6e29bc17d4d5d1299e76278b5ee1c 100644 (file)
@@ -71,7 +71,9 @@ extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2
 extern char *perf_pathdup(const char *fmt, ...)
        __attribute__((format (printf, 1, 2)));
 
+#ifndef __UCLIBC__
 /* Matches the libc/libbsd function attribute so we declare this unconditionally: */
 extern size_t strlcpy(char *dest, const char *src, size_t size);
+#endif
 
 #endif /* __PERF_CACHE_H */
index 64b377e591e457746138173cfa59533f887e3d56..14e7a123d43b3f4ab4e04a5aba7448bd5d1106cd 100644 (file)
@@ -841,3 +841,33 @@ char *callchain_list__sym_name(struct callchain_list *cl,
 
        return bf;
 }
+
+static void free_callchain_node(struct callchain_node *node)
+{
+       struct callchain_list *list, *tmp;
+       struct callchain_node *child;
+       struct rb_node *n;
+
+       list_for_each_entry_safe(list, tmp, &node->val, list) {
+               list_del(&list->list);
+               free(list);
+       }
+
+       n = rb_first(&node->rb_root_in);
+       while (n) {
+               child = container_of(n, struct callchain_node, rb_node_in);
+               n = rb_next(n);
+               rb_erase(&child->rb_node_in, &node->rb_root_in);
+
+               free_callchain_node(child);
+               free(child);
+       }
+}
+
+void free_callchain(struct callchain_root *root)
+{
+       if (!symbol_conf.use_callchain)
+               return;
+
+       free_callchain_node(&root->node);
+}
index dbc08cf5f970a2f25e9451ca5e259a38f5cdbfe1..c0ec1acc38e404aa599b5b6635d004ac2f0e204f 100644 (file)
@@ -198,4 +198,6 @@ static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused,
 char *callchain_list__sym_name(struct callchain_list *cl,
                               char *bf, size_t bfsize, bool show_dso);
 
+void free_callchain(struct callchain_root *root);
+
 #endif /* __PERF_CALLCHAIN_H */
index cbab1fb77b1d6c4efb8565144256e6f1c5a6d540..2e507b5025a3ed6e3f3df1e385aa3c8f13637389 100644 (file)
@@ -1445,7 +1445,7 @@ int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
        case ENOENT:
                scnprintf(buf, size, "%s",
                          "Error:\tUnable to find debugfs\n"
-                         "Hint:\tWas your kernel was compiled with debugfs support?\n"
+                         "Hint:\tWas your kernel compiled with debugfs support?\n"
                          "Hint:\tIs the debugfs filesystem mounted?\n"
                          "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
                break;
index 6e88b9e395df67abb0458eea80878112acab0b0a..182395546ddca63d919886f4b49896fbdd46e3e2 100644 (file)
@@ -6,6 +6,7 @@
 #include "evlist.h"
 #include "evsel.h"
 #include "annotate.h"
+#include "ui/progress.h"
 #include <math.h>
 
 static bool hists__filter_entry_by_dso(struct hists *hists,
@@ -303,7 +304,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
        size_t callchain_size = 0;
        struct hist_entry *he;
 
-       if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain)
+       if (symbol_conf.use_callchain)
                callchain_size = sizeof(struct callchain_root);
 
        he = zalloc(sizeof(*he) + callchain_size);
@@ -736,7 +737,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
        iter->he = he;
        he_cache[iter->curr++] = he;
 
-       callchain_append(he->callchain, &callchain_cursor, sample->period);
+       hist_entry__append_callchain(he, sample);
 
        /*
         * We need to re-initialize the cursor since callchain_append()
@@ -809,7 +810,8 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
        iter->he = he;
        he_cache[iter->curr++] = he;
 
-       callchain_append(he->callchain, &cursor, sample->period);
+       if (symbol_conf.use_callchain)
+               callchain_append(he->callchain, &cursor, sample->period);
        return 0;
 }
 
@@ -945,6 +947,7 @@ void hist_entry__free(struct hist_entry *he)
        zfree(&he->mem_info);
        zfree(&he->stat_acc);
        free_srcline(he->srcline);
+       free_callchain(he->callchain);
        free(he);
 }
 
@@ -987,6 +990,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
                else
                        p = &(*p)->rb_right;
        }
+       hists->nr_entries++;
 
        rb_link_node(&he->rb_node_in, parent, p);
        rb_insert_color(&he->rb_node_in, root);
@@ -1024,7 +1028,10 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
        if (!sort__need_collapse)
                return;
 
+       hists->nr_entries = 0;
+
        root = hists__get_rotate_entries_in(hists);
+
        next = rb_first(root);
 
        while (next) {
@@ -1119,7 +1126,7 @@ static void __hists__insert_output_entry(struct rb_root *entries,
        rb_insert_color(&he->rb_node, entries);
 }
 
-void hists__output_resort(struct hists *hists)
+void hists__output_resort(struct hists *hists, struct ui_progress *prog)
 {
        struct rb_root *root;
        struct rb_node *next;
@@ -1148,6 +1155,9 @@ void hists__output_resort(struct hists *hists)
 
                if (!n->filtered)
                        hists__calc_col_len(hists, n);
+
+               if (prog)
+                       ui_progress__update(prog, 1);
        }
 }
 
index d0ef9a19a7445caaf7bdc1d21b42ea2d1087a2a9..46bd50344f853f8f55f43bc23cd95f8459e53cab 100644 (file)
@@ -121,7 +121,7 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
                              struct hists *hists);
 void hist_entry__free(struct hist_entry *);
 
-void hists__output_resort(struct hists *hists);
+void hists__output_resort(struct hists *hists, struct ui_progress *prog);
 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
 
 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
diff --git a/tools/perf/util/hweight.c b/tools/perf/util/hweight.c
deleted file mode 100644 (file)
index 5c1d0d0..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-#include <linux/bitops.h>
-
-/**
- * hweightN - returns the hamming weight of a N-bit word
- * @x: the word to weigh
- *
- * The Hamming Weight of a number is the total number of bits set in it.
- */
-
-unsigned int hweight32(unsigned int w)
-{
-       unsigned int res = w - ((w >> 1) & 0x55555555);
-       res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
-       res = (res + (res >> 4)) & 0x0F0F0F0F;
-       res = res + (res >> 8);
-       return (res + (res >> 16)) & 0x000000FF;
-}
-
-unsigned long hweight64(__u64 w)
-{
-#if BITS_PER_LONG == 32
-       return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
-#elif BITS_PER_LONG == 64
-       __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
-       res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
-       res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
-       res = res + (res >> 8);
-       res = res + (res >> 16);
-       return (res + (res >> 32)) & 0x00000000000000FFul;
-#endif
-}
diff --git a/tools/perf/util/include/asm/hweight.h b/tools/perf/util/include/asm/hweight.h
deleted file mode 100644 (file)
index 36cf26d..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef PERF_HWEIGHT_H
-#define PERF_HWEIGHT_H
-
-#include <linux/types.h>
-unsigned int hweight32(unsigned int w);
-unsigned long hweight64(__u64 w);
-
-#endif /* PERF_HWEIGHT_H */
index 94de3e48b4909a03a7e7037f779074e5bf31ff8a..1bca3a9f2b16bc91670f731e05564680d2f12a10 100644 (file)
@@ -389,7 +389,6 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
        if (th != NULL) {
                rb_link_node(&th->rb_node, parent, p);
                rb_insert_color(&th->rb_node, &machine->threads);
-               machine->last_match = th;
 
                /*
                 * We have to initialize map_groups separately
@@ -400,9 +399,12 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
                 * leader and that would screwed the rb tree.
                 */
                if (thread__init_map_groups(th, machine)) {
+                       rb_erase(&th->rb_node, &machine->threads);
                        thread__delete(th);
                        return NULL;
                }
+
+               machine->last_match = th;
        }
 
        return th;
index 6951a9d42339ee089c67dd068622f41c64f4670b..0e42438b1e593c0e6369186d84cc1ccad76a0093 100644 (file)
@@ -116,6 +116,22 @@ struct thread;
 #define map__for_each_symbol(map, pos, n)      \
        dso__for_each_symbol(map->dso, pos, n, map->type)
 
+/* map__for_each_symbol_with_name - iterate over the symbols in the given map
+ *                                  that have the given name
+ *
+ * @map: the 'struct map *' in which symbols itereated
+ * @sym_name: the symbol name
+ * @pos: the 'struct symbol *' to use as a loop cursor
+ * @filter: to use when loading the DSO
+ */
+#define __map__for_each_symbol_by_name(map, sym_name, pos, filter)     \
+       for (pos = map__find_symbol_by_name(map, sym_name, filter);     \
+            pos && strcmp(pos->name, sym_name) == 0;           \
+            pos = symbol__next_by_name(pos))
+
+#define map__for_each_symbol_by_name(map, sym_name, pos)               \
+       __map__for_each_symbol_by_name(map, sym_name, (pos), NULL)
+
 typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
 
 void map__init(struct map *map, enum map_type type,
index 28eb1417cb2a3fc5d3acebc2280cf37ac79ad778..919937eb0be2b643e93bfe7fc983e7131074deeb 100644 (file)
@@ -446,7 +446,7 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs,
        }
 
        for (i = 0; i < ntevs; i++) {
-               if (tevs[i].point.address) {
+               if (tevs[i].point.address && !tevs[i].point.retprobe) {
                        tmp = strdup(reloc_sym->name);
                        if (!tmp)
                                return -ENOMEM;
@@ -495,9 +495,11 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
        }
 
        if (ntevs == 0) {       /* No error but failed to find probe point. */
-               pr_warning("Probe point '%s' not found.\n",
+               pr_warning("Probe point '%s' not found in debuginfo.\n",
                           synthesize_perf_probe_point(&pev->point));
-               return -ENOENT;
+               if (need_dwarf)
+                       return -ENOENT;
+               return 0;
        }
        /* Error path : ntevs < 0 */
        pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
@@ -2050,9 +2052,11 @@ static int write_probe_trace_event(int fd, struct probe_trace_event *tev)
        pr_debug("Writing event: %s\n", buf);
        if (!probe_event_dry_run) {
                ret = write(fd, buf, strlen(buf));
-               if (ret <= 0)
+               if (ret <= 0) {
+                       ret = -errno;
                        pr_warning("Failed to write event: %s\n",
                                   strerror_r(errno, sbuf, sizeof(sbuf)));
+               }
        }
        free(buf);
        return ret;
@@ -2189,18 +2193,17 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
        return ret;
 }
 
-static char *looking_function_name;
-static int num_matched_functions;
-
-static int probe_function_filter(struct map *map __maybe_unused,
-                                     struct symbol *sym)
+static int find_probe_functions(struct map *map, char *name)
 {
-       if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) &&
-           strcmp(looking_function_name, sym->name) == 0) {
-               num_matched_functions++;
-               return 0;
+       int found = 0;
+       struct symbol *sym;
+
+       map__for_each_symbol_by_name(map, name, sym) {
+               if (sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL)
+                       found++;
        }
-       return 1;
+
+       return found;
 }
 
 #define strdup_or_goto(str, label)     \
@@ -2218,10 +2221,10 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
        struct kmap *kmap = NULL;
        struct ref_reloc_sym *reloc_sym = NULL;
        struct symbol *sym;
-       struct rb_node *nd;
        struct probe_trace_event *tev;
        struct perf_probe_point *pp = &pev->point;
        struct probe_trace_point *tp;
+       int num_matched_functions;
        int ret, i;
 
        /* Init maps of given executable or kernel */
@@ -2238,10 +2241,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
         * Load matched symbols: Since the different local symbols may have
         * same name but different addresses, this lists all the symbols.
         */
-       num_matched_functions = 0;
-       looking_function_name = pp->function;
-       ret = map__load(map, probe_function_filter);
-       if (ret || num_matched_functions == 0) {
+       num_matched_functions = find_probe_functions(map, pp->function);
+       if (num_matched_functions == 0) {
                pr_err("Failed to find symbol %s in %s\n", pp->function,
                        target ? : "kernel");
                ret = -ENOENT;
@@ -2253,7 +2254,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
                goto out;
        }
 
-       if (!pev->uprobes) {
+       if (!pev->uprobes && !pp->retprobe) {
                kmap = map__kmap(map);
                reloc_sym = kmap->ref_reloc_sym;
                if (!reloc_sym) {
@@ -2271,7 +2272,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
        }
 
        ret = 0;
-       map__for_each_symbol(map, sym, nd) {
+
+       map__for_each_symbol_by_name(map, pp->function, sym) {
                tev = (*tevs) + ret;
                tp = &tev->point;
                if (ret == num_matched_functions) {
index c7918f83b300086649f522bc5f663d9a5a88a5dc..b5247d777f0e9348d1b77e3f33813c5b7713bce4 100644 (file)
@@ -989,8 +989,24 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
        int ret = 0;
 
 #if _ELFUTILS_PREREQ(0, 142)
+       Elf *elf;
+       GElf_Ehdr ehdr;
+       GElf_Shdr shdr;
+
        /* Get the call frame information from this dwarf */
-       pf->cfi = dwarf_getcfi_elf(dwarf_getelf(dbg->dbg));
+       elf = dwarf_getelf(dbg->dbg);
+       if (elf == NULL)
+               return -EINVAL;
+
+       if (gelf_getehdr(elf, &ehdr) == NULL)
+               return -EINVAL;
+
+       if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
+           shdr.sh_type == SHT_PROGBITS) {
+               pf->cfi = dwarf_getcfi_elf(elf);
+       } else {
+               pf->cfi = dwarf_getcfi(dbg->dbg);
+       }
 #endif
 
        off = 0;
index 16a475a7d492177623062143434488116cdf2a38..6c6a6953fa93fa5b4fe92229df7613bbe996bec9 100644 (file)
@@ -10,7 +10,7 @@ util/ctype.c
 util/evlist.c
 util/evsel.c
 util/cpumap.c
-util/hweight.c
+../../lib/hweight.c
 util/thread_map.c
 util/util.c
 util/xyarray.c
index c24c5b83156cd92ec5ba5c457153e43706b81424..a194702a0a2f5af700a3177832823502e4d383d7 100644 (file)
@@ -396,6 +396,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
                                            const char *name)
 {
        struct rb_node *n;
+       struct symbol_name_rb_node *s;
 
        if (symbols == NULL)
                return NULL;
@@ -403,7 +404,6 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
        n = symbols->rb_node;
 
        while (n) {
-               struct symbol_name_rb_node *s;
                int cmp;
 
                s = rb_entry(n, struct symbol_name_rb_node, rb_node);
@@ -414,10 +414,24 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
                else if (cmp > 0)
                        n = n->rb_right;
                else
-                       return &s->sym;
+                       break;
        }
 
-       return NULL;
+       if (n == NULL)
+               return NULL;
+
+       /* return first symbol that has same name (if any) */
+       for (n = rb_prev(n); n; n = rb_prev(n)) {
+               struct symbol_name_rb_node *tmp;
+
+               tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
+               if (strcmp(tmp->sym.name, s->sym.name))
+                       break;
+
+               s = tmp;
+       }
+
+       return &s->sym;
 }
 
 struct symbol *dso__find_symbol(struct dso *dso,
@@ -436,6 +450,17 @@ struct symbol *dso__next_symbol(struct symbol *sym)
        return symbols__next(sym);
 }
 
+struct symbol *symbol__next_by_name(struct symbol *sym)
+{
+       struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
+       struct rb_node *n = rb_next(&s->rb_node);
+
+       return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
+}
+
+ /*
+  * Teturns first symbol that matched with @name.
+  */
 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
                                        const char *name)
 {
index 9d602e9c6f590f73eb9413f53bbbceeada64b20d..1650dcb3a67bc3fddff40c93c6fdf6a1122d185a 100644 (file)
@@ -231,6 +231,7 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
                                u64 addr);
 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
                                        const char *name);
+struct symbol *symbol__next_by_name(struct symbol *sym);
 
 struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
 struct symbol *dso__next_symbol(struct symbol *sym);
index 371219a6daf1cd8209687115bca9a67f7ffc3209..6edf535f65c23428b4982fb651ba3df997d55dfb 100644 (file)
@@ -185,6 +185,28 @@ static u64 elf_section_offset(int fd, const char *name)
        return offset;
 }
 
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+static int elf_is_exec(int fd, const char *name)
+{
+       Elf *elf;
+       GElf_Ehdr ehdr;
+       int retval = 0;
+
+       elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+       if (elf == NULL)
+               return 0;
+       if (gelf_getehdr(elf, &ehdr) == NULL)
+               goto out;
+
+       retval = (ehdr.e_type == ET_EXEC);
+
+out:
+       elf_end(elf);
+       pr_debug("unwind: elf_is_exec(%s): %d\n", name, retval);
+       return retval;
+}
+#endif
+
 struct table_entry {
        u32 start_ip_offset;
        u32 fde_offset;
@@ -322,8 +344,12 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
 #ifndef NO_LIBUNWIND_DEBUG_FRAME
        /* Check the .debug_frame section for unwinding info */
        if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
+               int fd = dso__data_fd(map->dso, ui->machine);
+               int is_exec = elf_is_exec(fd, map->dso->name);
+               unw_word_t base = is_exec ? 0 : map->start;
+
                memset(&di, 0, sizeof(di));
-               if (dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name,
+               if (dwarf_find_debug_frame(0, &di, ip, base, map->dso->name,
                                           map->start, map->end))
                        return dwarf_search_unwind_table(as, ip, &di, pi,
                                                         need_unwind_info, arg);
index d273624c93a642544f8ba2b31d45cebbc505dc52..e238c9559caf9a7757d2d389e0bda57cd73229a8 100644 (file)
@@ -62,7 +62,7 @@ static int _check_execveat_fail(int fd, const char *path, int flags,
 }
 
 static int check_execveat_invoked_rc(int fd, const char *path, int flags,
-                                    int expected_rc)
+                                    int expected_rc, int expected_rc2)
 {
        int status;
        int rc;
@@ -98,9 +98,10 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
                        child, status);
                return 1;
        }
-       if (WEXITSTATUS(status) != expected_rc) {
-               printf("[FAIL] (child %d exited with %d not %d)\n",
-                       child, WEXITSTATUS(status), expected_rc);
+       if ((WEXITSTATUS(status) != expected_rc) &&
+           (WEXITSTATUS(status) != expected_rc2)) {
+               printf("[FAIL] (child %d exited with %d not %d nor %d)\n",
+                       child, WEXITSTATUS(status), expected_rc, expected_rc2);
                return 1;
        }
        printf("[OK]\n");
@@ -109,7 +110,7 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
 
 static int check_execveat(int fd, const char *path, int flags)
 {
-       return check_execveat_invoked_rc(fd, path, flags, 99);
+       return check_execveat_invoked_rc(fd, path, flags, 99, 99);
 }
 
 static char *concat(const char *left, const char *right)
@@ -192,9 +193,15 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
         * Execute as a long pathname relative to ".".  If this is a script,
         * the interpreter will launch but fail to open the script because its
         * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX.
+        *
+        * The failure code is usually 127 (POSIX: "If a command is not found,
+        * the exit status shall be 127."), but some systems give 126 (POSIX:
+        * "If the command name is found, but it is not an executable utility,
+        * the exit status shall be 126."), so allow either.
         */
        if (is_script)
-               fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 127);
+               fail += check_execveat_invoked_rc(dot_dfd, longpath, 0,
+                                                 127, 126);
        else
                fail += check_execveat(dot_dfd, longpath, 0);
 
index 94dae65eea4183b43bf3e52bf9bf22dc9c0d4471..8519e9ee97e3d3e4a344e798cabf5b38edfff602 100644 (file)
@@ -536,10 +536,9 @@ int main(int argc, char *argv[])
 {
        struct mq_attr attr;
        char *option, *next_option;
-       int i, cpu;
+       int i, cpu, rc;
        struct sigaction sa;
        poptContext popt_context;
-       char rc;
        void *retval;
 
        main_thread = pthread_self();
index 4c4b1f631ecf61f6e3048d746c23be39ea4f2ef9..077828c889f1377886b98c93349919d55ccb57a2 100644 (file)
@@ -7,7 +7,7 @@ BINARIES += transhuge-stress
 
 all: $(BINARIES)
 %: %.c
-       $(CC) $(CFLAGS) -o $@ $^
+       $(CC) $(CFLAGS) -o $@ $^ -lrt
 
 run_tests: all
        @/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1)